selinux/
access_vector_cache.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::fifo_cache::FifoCache;
6use crate::policy::{AccessDecision, IoctlAccessDecision};
7use crate::sync::Mutex;
8use crate::{FsNodeClass, KernelClass, NullessByteStr, ObjectClass, SecurityId};
9use std::sync::atomic::{AtomicU64, Ordering};
10use std::sync::{Arc, Weak};
11
12pub use crate::fifo_cache::{CacheStats, HasCacheStats};
13
14/// Interface used internally by the `SecurityServer` implementation to implement policy queries
15/// such as looking up the set of permissions to grant, or the Security Context to apply to new
16/// files, etc.
17///
18/// This trait allows layering of caching, delegation, and thread-safety between the policy-backed
19/// calculations, and the caller-facing permission-check interface.
20pub(super) trait Query {
21    /// Computes the [`AccessDecision`] permitted to `source_sid` for accessing `target_sid`, an
22    /// object of of type `target_class`.
23    fn compute_access_decision(
24        &self,
25        source_sid: SecurityId,
26        target_sid: SecurityId,
27        target_class: ObjectClass,
28    ) -> AccessDecision;
29
30    /// Returns the security identifier (SID) with which to label a new `fs_node_class` instance
31    /// created by `source_sid` in a parent directory labeled `target_sid` should be labeled,
32    /// if no more specific SID was specified by `compute_new_fs_node_sid_with_name()`, based on
33    /// the file's name.
34    fn compute_new_fs_node_sid(
35        &self,
36        source_sid: SecurityId,
37        target_sid: SecurityId,
38        fs_node_class: FsNodeClass,
39    ) -> Result<SecurityId, anyhow::Error>;
40
41    /// Returns the security identifier (SID) with which to label a new `fs_node_class` instance of
42    /// name `fs_node_name`, created by `source_sid` in a parent directory labeled `target_sid`.
43    /// If no filename-transition rules exist for the specified `fs_node_name` then `None` is
44    /// returned.
45    fn compute_new_fs_node_sid_with_name(
46        &self,
47        source_sid: SecurityId,
48        target_sid: SecurityId,
49        fs_node_class: FsNodeClass,
50        fs_node_name: NullessByteStr<'_>,
51    ) -> Option<SecurityId>;
52
53    /// Computes the [`IoctlAccessDecision`] permitted to `source_sid` for accessing `target_sid`,
54    /// an object of of type `target_class`, for ioctls with high byte `ioctl_prefix`.
55    fn compute_ioctl_access_decision(
56        &self,
57        source_sid: SecurityId,
58        target_sid: SecurityId,
59        target_class: ObjectClass,
60        ioctl_prefix: u8,
61    ) -> IoctlAccessDecision;
62}
63
64/// An interface for computing the rights permitted to a source accessing a target of a particular
65/// SELinux object type.
66pub trait QueryMut {
67    /// Computes the [`AccessDecision`] permitted to `source_sid` for accessing `target_sid`, an
68    /// object of type `target_class`.
69    fn compute_access_decision(
70        &mut self,
71        source_sid: SecurityId,
72        target_sid: SecurityId,
73        target_class: ObjectClass,
74    ) -> AccessDecision;
75
76    /// Returns the security identifier (SID) with which to label a new `fs_node_class` instance
77    /// created by `source_sid` in a parent directory labeled `target_sid` should be labeled,
78    /// if no more specific SID was specified by `compute_new_fs_node_sid_with_name()`, based on
79    /// the file's name.
80    fn compute_new_fs_node_sid(
81        &mut self,
82        source_sid: SecurityId,
83        target_sid: SecurityId,
84        fs_node_class: FsNodeClass,
85    ) -> Result<SecurityId, anyhow::Error>;
86
87    /// Returns the security identifier (SID) with which to label a new `fs_node_class` instance of
88    /// name `fs_node_name`, created by `source_sid` in a parent directory labeled `target_sid`.
89    /// If no filename-transition rules exist for the specified `fs_node_name` then `None` is
90    /// returned.
91    fn compute_new_fs_node_sid_with_name(
92        &mut self,
93        source_sid: SecurityId,
94        target_sid: SecurityId,
95        fs_node_class: FsNodeClass,
96        fs_node_name: NullessByteStr<'_>,
97    ) -> Option<SecurityId>;
98
99    /// Computes the [`IoctlAccessDecision`] permitted to `source_sid` for accessing `target_sid`,
100    /// an object of of type `target_class`, for ioctls with high byte `ioctl_prefix`.
101    fn compute_ioctl_access_decision(
102        &mut self,
103        source_sid: SecurityId,
104        target_sid: SecurityId,
105        target_class: ObjectClass,
106        ioctl_prefix: u8,
107    ) -> IoctlAccessDecision;
108}
109
110impl<Q: Query> QueryMut for Q {
111    fn compute_access_decision(
112        &mut self,
113        source_sid: SecurityId,
114        target_sid: SecurityId,
115        target_class: ObjectClass,
116    ) -> AccessDecision {
117        (self as &dyn Query).compute_access_decision(source_sid, target_sid, target_class)
118    }
119
120    fn compute_new_fs_node_sid(
121        &mut self,
122        source_sid: SecurityId,
123        target_sid: SecurityId,
124        fs_node_class: FsNodeClass,
125    ) -> Result<SecurityId, anyhow::Error> {
126        (self as &dyn Query).compute_new_fs_node_sid(source_sid, target_sid, fs_node_class)
127    }
128
129    fn compute_new_fs_node_sid_with_name(
130        &mut self,
131        source_sid: SecurityId,
132        target_sid: SecurityId,
133        fs_node_class: FsNodeClass,
134        fs_node_name: NullessByteStr<'_>,
135    ) -> Option<SecurityId> {
136        (self as &dyn Query).compute_new_fs_node_sid_with_name(
137            source_sid,
138            target_sid,
139            fs_node_class,
140            fs_node_name,
141        )
142    }
143
144    fn compute_ioctl_access_decision(
145        &mut self,
146        source_sid: SecurityId,
147        target_sid: SecurityId,
148        target_class: ObjectClass,
149        ioctl_prefix: u8,
150    ) -> IoctlAccessDecision {
151        (self as &dyn Query).compute_ioctl_access_decision(
152            source_sid,
153            target_sid,
154            target_class,
155            ioctl_prefix,
156        )
157    }
158}
159
160/// An interface for emptying caches that store [`Query`] input/output pairs. This interface
161/// requires implementers to update state via interior mutability.
162pub(super) trait Reset {
163    /// Removes all entries from this cache and any reset delegate caches encapsulated in this
164    /// cache. Returns true only if the cache is still valid after reset.
165    fn reset(&self) -> bool;
166}
167
168/// An interface for emptying caches that store [`Query`] input/output pairs.
169pub(super) trait ResetMut {
170    /// Removes all entries from this cache and any reset delegate caches encapsulated in this
171    /// cache. Returns true only if the cache is still valid after reset.
172    fn reset(&mut self) -> bool;
173}
174
175impl<R: Reset> ResetMut for R {
176    fn reset(&mut self) -> bool {
177        (self as &dyn Reset).reset()
178    }
179}
180
181pub(super) trait ProxyMut<D> {
182    fn set_delegate(&mut self, delegate: D) -> D;
183}
184
185#[derive(Clone, Hash, PartialEq, Eq)]
186struct AccessQueryArgs {
187    source_sid: SecurityId,
188    target_sid: SecurityId,
189    target_class: ObjectClass,
190}
191
192#[derive(Clone)]
193struct AccessQueryResult {
194    access_decision: AccessDecision,
195    new_file_sid: Option<SecurityId>,
196}
197
198#[derive(Clone, Hash, PartialEq, Eq)]
199struct IoctlAccessQueryArgs {
200    source_sid: SecurityId,
201    target_sid: SecurityId,
202    target_class: ObjectClass,
203    ioctl_prefix: u8,
204}
205
206/// Thread-hostile associative cache with capacity defined at construction and FIFO eviction.
207pub(super) struct FifoQueryCache<D> {
208    access_cache: FifoCache<AccessQueryArgs, AccessQueryResult>,
209    ioctl_access_cache: FifoCache<IoctlAccessQueryArgs, IoctlAccessDecision>,
210    delegate: D,
211}
212
213impl<D> FifoQueryCache<D> {
214    // The multiplier used to compute the ioctl access cache capacity from the main cache capacity.
215    const IOCTL_CAPACITY_MULTIPLIER: f32 = 0.25;
216
217    /// Constructs a fixed-size access vector cache that delegates to `delegate`.
218    ///
219    /// # Panics
220    ///
221    /// This will panic if called with a `capacity` of zero.
222    pub fn new(delegate: D, capacity: usize) -> Self {
223        assert!(capacity > 0, "cannot instantiate fixed access vector cache of size 0");
224        let ioctl_access_cache_capacity =
225            (Self::IOCTL_CAPACITY_MULTIPLIER * (capacity as f32)) as usize;
226        assert!(
227            ioctl_access_cache_capacity > 0,
228            "cannot instantiate ioctl cache partition of size 0"
229        );
230
231        Self {
232            // Request `capacity` plus one element working-space for insertions that trigger
233            // an eviction.
234            access_cache: FifoCache::with_capacity(capacity),
235            ioctl_access_cache: FifoCache::with_capacity(ioctl_access_cache_capacity),
236            delegate,
237        }
238    }
239
240    /// Returns true if the main access decision cache has reached capacity.
241    #[cfg(test)]
242    fn access_cache_is_full(&self) -> bool {
243        self.access_cache.is_full()
244    }
245
246    /// Returns true if the ioctl access decision cache has reached capacity.
247    #[cfg(test)]
248    fn ioctl_access_cache_is_full(&self) -> bool {
249        self.ioctl_access_cache.is_full()
250    }
251}
252
253impl<D: QueryMut> QueryMut for FifoQueryCache<D> {
254    fn compute_access_decision(
255        &mut self,
256        source_sid: SecurityId,
257        target_sid: SecurityId,
258        target_class: ObjectClass,
259    ) -> AccessDecision {
260        let query_args =
261            AccessQueryArgs { source_sid, target_sid, target_class: target_class.clone() };
262        if let Some(result) = self.access_cache.get(&query_args) {
263            return result.access_decision.clone();
264        }
265
266        let access_decision =
267            self.delegate.compute_access_decision(source_sid, target_sid, target_class);
268
269        self.access_cache.insert(
270            query_args,
271            AccessQueryResult { access_decision: access_decision.clone(), new_file_sid: None },
272        );
273
274        access_decision
275    }
276
277    fn compute_new_fs_node_sid(
278        &mut self,
279        source_sid: SecurityId,
280        target_sid: SecurityId,
281        fs_node_class: FsNodeClass,
282    ) -> Result<SecurityId, anyhow::Error> {
283        let target_class = ObjectClass::Kernel(KernelClass::from(fs_node_class));
284
285        let query_args =
286            AccessQueryArgs { source_sid, target_sid, target_class: target_class.clone() };
287        let query_result = if let Some(result) = self.access_cache.get(&query_args) {
288            result
289        } else {
290            let access_decision =
291                self.delegate.compute_access_decision(source_sid, target_sid, target_class);
292            self.access_cache
293                .insert(query_args, AccessQueryResult { access_decision, new_file_sid: None })
294        };
295
296        if let Some(new_file_sid) = query_result.new_file_sid {
297            Ok(new_file_sid)
298        } else {
299            let new_file_sid =
300                self.delegate.compute_new_fs_node_sid(source_sid, target_sid, fs_node_class);
301            if let Ok(new_file_sid) = new_file_sid {
302                query_result.new_file_sid = Some(new_file_sid);
303            }
304            new_file_sid
305        }
306    }
307
308    fn compute_new_fs_node_sid_with_name(
309        &mut self,
310        source_sid: SecurityId,
311        target_sid: SecurityId,
312        fs_node_class: FsNodeClass,
313        fs_node_name: NullessByteStr<'_>,
314    ) -> Option<SecurityId> {
315        self.delegate.compute_new_fs_node_sid_with_name(
316            source_sid,
317            target_sid,
318            fs_node_class,
319            fs_node_name,
320        )
321    }
322
323    fn compute_ioctl_access_decision(
324        &mut self,
325        source_sid: SecurityId,
326        target_sid: SecurityId,
327        target_class: ObjectClass,
328        ioctl_prefix: u8,
329    ) -> IoctlAccessDecision {
330        let query_args = IoctlAccessQueryArgs {
331            source_sid,
332            target_sid,
333            target_class: target_class.clone(),
334            ioctl_prefix,
335        };
336        if let Some(result) = self.ioctl_access_cache.get(&query_args) {
337            return result.clone();
338        }
339
340        let ioctl_access_decision = self.delegate.compute_ioctl_access_decision(
341            source_sid,
342            target_sid,
343            target_class,
344            ioctl_prefix,
345        );
346
347        self.ioctl_access_cache.insert(query_args, ioctl_access_decision.clone());
348
349        ioctl_access_decision
350    }
351}
352
353impl<D> HasCacheStats for FifoQueryCache<D> {
354    fn cache_stats(&self) -> CacheStats {
355        &self.access_cache.cache_stats() + &self.ioctl_access_cache.cache_stats()
356    }
357}
358
359impl<D> ResetMut for FifoQueryCache<D> {
360    fn reset(&mut self) -> bool {
361        self.access_cache = FifoCache::with_capacity(self.access_cache.capacity());
362        self.ioctl_access_cache = FifoCache::with_capacity(self.ioctl_access_cache.capacity());
363        true
364    }
365}
366
367impl<D> ProxyMut<D> for FifoQueryCache<D> {
368    fn set_delegate(&mut self, mut delegate: D) -> D {
369        std::mem::swap(&mut self.delegate, &mut delegate);
370        delegate
371    }
372}
373
374/// A locked access vector cache.
375pub(super) struct Locked<D> {
376    delegate: Arc<Mutex<D>>,
377}
378
379impl<D> Clone for Locked<D> {
380    fn clone(&self) -> Self {
381        Self { delegate: self.delegate.clone() }
382    }
383}
384
385impl<D> Locked<D> {
386    /// Constructs a locked access vector cache that delegates to `delegate`.
387    pub fn new(delegate: D) -> Self {
388        Self { delegate: Arc::new(Mutex::new(delegate)) }
389    }
390}
391
392impl<D: QueryMut> Query for Locked<D> {
393    fn compute_access_decision(
394        &self,
395        source_sid: SecurityId,
396        target_sid: SecurityId,
397        target_class: ObjectClass,
398    ) -> AccessDecision {
399        self.delegate.lock().compute_access_decision(source_sid, target_sid, target_class)
400    }
401
402    fn compute_new_fs_node_sid(
403        &self,
404        source_sid: SecurityId,
405        target_sid: SecurityId,
406        fs_node_class: FsNodeClass,
407    ) -> Result<SecurityId, anyhow::Error> {
408        self.delegate.lock().compute_new_fs_node_sid(source_sid, target_sid, fs_node_class)
409    }
410
411    fn compute_new_fs_node_sid_with_name(
412        &self,
413        source_sid: SecurityId,
414        target_sid: SecurityId,
415        fs_node_class: FsNodeClass,
416        fs_node_name: NullessByteStr<'_>,
417    ) -> Option<SecurityId> {
418        self.delegate.lock().compute_new_fs_node_sid_with_name(
419            source_sid,
420            target_sid,
421            fs_node_class,
422            fs_node_name,
423        )
424    }
425
426    fn compute_ioctl_access_decision(
427        &self,
428        source_sid: SecurityId,
429        target_sid: SecurityId,
430        target_class: ObjectClass,
431        ioctl_prefix: u8,
432    ) -> IoctlAccessDecision {
433        self.delegate.lock().compute_ioctl_access_decision(
434            source_sid,
435            target_sid,
436            target_class,
437            ioctl_prefix,
438        )
439    }
440}
441
442impl<D: HasCacheStats> HasCacheStats for Locked<D> {
443    fn cache_stats(&self) -> CacheStats {
444        self.delegate.lock().cache_stats()
445    }
446}
447
448impl<D: ResetMut> Reset for Locked<D> {
449    fn reset(&self) -> bool {
450        self.delegate.lock().reset()
451    }
452}
453
454impl<D> Locked<D> {
455    pub fn set_stateful_cache_delegate<PD>(&self, delegate: PD) -> PD
456    where
457        D: ProxyMut<PD>,
458    {
459        self.delegate.lock().set_delegate(delegate)
460    }
461}
462
463/// A wrapper around an atomic integer that implements [`Reset`]. Instances of this type are used as
464/// a version number to indicate when a cache needs to be emptied.
465#[derive(Default)]
466pub struct AtomicVersion(AtomicU64);
467
468impl AtomicVersion {
469    /// Atomically load the version number.
470    pub fn version(&self) -> u64 {
471        self.0.load(Ordering::Relaxed)
472    }
473
474    /// Atomically increment the version number.
475    pub fn increment_version(&self) {
476        self.0.fetch_add(1, Ordering::Relaxed);
477    }
478}
479
480impl Reset for AtomicVersion {
481    fn reset(&self) -> bool {
482        self.increment_version();
483        true
484    }
485}
486
487impl<Q: Query> Query for Arc<Q> {
488    fn compute_access_decision(
489        &self,
490        source_sid: SecurityId,
491        target_sid: SecurityId,
492        target_class: ObjectClass,
493    ) -> AccessDecision {
494        self.as_ref().compute_access_decision(source_sid, target_sid, target_class)
495    }
496
497    fn compute_new_fs_node_sid(
498        &self,
499        source_sid: SecurityId,
500        target_sid: SecurityId,
501        fs_node_class: FsNodeClass,
502    ) -> Result<SecurityId, anyhow::Error> {
503        self.as_ref().compute_new_fs_node_sid(source_sid, target_sid, fs_node_class)
504    }
505
506    fn compute_new_fs_node_sid_with_name(
507        &self,
508        source_sid: SecurityId,
509        target_sid: SecurityId,
510        fs_node_class: FsNodeClass,
511        fs_node_name: NullessByteStr<'_>,
512    ) -> Option<SecurityId> {
513        self.as_ref().compute_new_fs_node_sid_with_name(
514            source_sid,
515            target_sid,
516            fs_node_class,
517            fs_node_name,
518        )
519    }
520
521    fn compute_ioctl_access_decision(
522        &self,
523        source_sid: SecurityId,
524        target_sid: SecurityId,
525        target_class: ObjectClass,
526        ioctl_prefix: u8,
527    ) -> IoctlAccessDecision {
528        self.as_ref().compute_ioctl_access_decision(
529            source_sid,
530            target_sid,
531            target_class,
532            ioctl_prefix,
533        )
534    }
535}
536
537impl<R: Reset> Reset for Arc<R> {
538    fn reset(&self) -> bool {
539        self.as_ref().reset()
540    }
541}
542
543impl<Q: Query> Query for Weak<Q> {
544    fn compute_access_decision(
545        &self,
546        source_sid: SecurityId,
547        target_sid: SecurityId,
548        target_class: ObjectClass,
549    ) -> AccessDecision {
550        self.upgrade()
551            .map(|q| q.compute_access_decision(source_sid, target_sid, target_class))
552            .unwrap_or_default()
553    }
554
555    fn compute_new_fs_node_sid(
556        &self,
557        source_sid: SecurityId,
558        target_sid: SecurityId,
559        fs_node_class: FsNodeClass,
560    ) -> Result<SecurityId, anyhow::Error> {
561        self.upgrade()
562            .map(|q| q.compute_new_fs_node_sid(source_sid, target_sid, fs_node_class))
563            .unwrap_or(Err(anyhow::anyhow!("weak reference failed to resolve")))
564    }
565
566    fn compute_new_fs_node_sid_with_name(
567        &self,
568        source_sid: SecurityId,
569        target_sid: SecurityId,
570        fs_node_class: FsNodeClass,
571        fs_node_name: NullessByteStr<'_>,
572    ) -> Option<SecurityId> {
573        let delegate = self.upgrade()?;
574        delegate.compute_new_fs_node_sid_with_name(
575            source_sid,
576            target_sid,
577            fs_node_class,
578            fs_node_name,
579        )
580    }
581
582    fn compute_ioctl_access_decision(
583        &self,
584        source_sid: SecurityId,
585        target_sid: SecurityId,
586        target_class: ObjectClass,
587        ioctl_prefix: u8,
588    ) -> IoctlAccessDecision {
589        self.upgrade()
590            .map(|q| {
591                q.compute_ioctl_access_decision(source_sid, target_sid, target_class, ioctl_prefix)
592            })
593            .unwrap_or(IoctlAccessDecision::DENY_ALL)
594    }
595}
596
597impl<R: Reset> Reset for Weak<R> {
598    fn reset(&self) -> bool {
599        self.upgrade().as_deref().map(Reset::reset).unwrap_or(false)
600    }
601}
602
603/// An access vector cache that may be reset from any thread, but expects to always be queried
604/// from the same thread. The cache does not implement any specific caching strategies, but
605/// delegates *all* operations.
606///
607/// Resets are delegated lazily during queries.  A `reset()` induces an internal state change that
608/// results in at most one `reset()` call to the query delegate on the next query. This strategy
609/// allows [`ThreadLocalQuery`] to expose thread-safe reset implementation over thread-hostile
610/// access vector cache implementations.
611pub(super) struct ThreadLocalQuery<D> {
612    delegate: D,
613    current_version: u64,
614    active_version: Arc<AtomicVersion>,
615}
616
617impl<D> ThreadLocalQuery<D> {
618    /// Constructs a [`ThreadLocalQuery`] that delegates to `delegate`.
619    pub fn new(active_version: Arc<AtomicVersion>, delegate: D) -> Self {
620        Self { delegate, current_version: Default::default(), active_version }
621    }
622}
623
624impl<D: QueryMut + ResetMut> QueryMut for ThreadLocalQuery<D> {
625    fn compute_access_decision(
626        &mut self,
627        source_sid: SecurityId,
628        target_sid: SecurityId,
629        target_class: ObjectClass,
630    ) -> AccessDecision {
631        let version = self.active_version.as_ref().version();
632        if self.current_version != version {
633            self.current_version = version;
634            self.delegate.reset();
635        }
636
637        // Allow `self.delegate` to implement caching strategy and prepare response.
638        self.delegate.compute_access_decision(source_sid, target_sid, target_class)
639    }
640
641    fn compute_new_fs_node_sid(
642        &mut self,
643        source_sid: SecurityId,
644        target_sid: SecurityId,
645        fs_node_class: FsNodeClass,
646    ) -> Result<SecurityId, anyhow::Error> {
647        let version = self.active_version.as_ref().version();
648        if self.current_version != version {
649            self.current_version = version;
650            self.delegate.reset();
651        }
652
653        // Allow `self.delegate` to implement caching strategy and prepare response.
654        self.delegate.compute_new_fs_node_sid(source_sid, target_sid, fs_node_class)
655    }
656
657    fn compute_new_fs_node_sid_with_name(
658        &mut self,
659        source_sid: SecurityId,
660        target_sid: SecurityId,
661        fs_node_class: FsNodeClass,
662        fs_node_name: NullessByteStr<'_>,
663    ) -> Option<SecurityId> {
664        // Allow `self.delegate` to implement caching strategy and prepare response.
665        self.delegate.compute_new_fs_node_sid_with_name(
666            source_sid,
667            target_sid,
668            fs_node_class,
669            fs_node_name,
670        )
671    }
672
673    fn compute_ioctl_access_decision(
674        &mut self,
675        source_sid: SecurityId,
676        target_sid: SecurityId,
677        target_class: ObjectClass,
678        ioctl_prefix: u8,
679    ) -> IoctlAccessDecision {
680        self.delegate.compute_ioctl_access_decision(
681            source_sid,
682            target_sid,
683            target_class,
684            ioctl_prefix,
685        )
686    }
687}
688
689/// Default size of an access vector cache shared by all threads in the system.
690const DEFAULT_SHARED_SIZE: usize = 1000;
691
692/// Default size of a thread-local access vector cache.
693const DEFAULT_THREAD_LOCAL_SIZE: usize = 10;
694
695/// Composite access vector cache manager that delegates queries to security server type, `SS`, and
696/// owns a shared cache of size `DEFAULT_SHARED_SIZE`, and can produce thread-local caches of size
697/// `DEFAULT_THREAD_LOCAL_SIZE`.
698pub(super) struct Manager<SS> {
699    shared_cache: Locked<FifoQueryCache<Weak<SS>>>,
700    thread_local_version: Arc<AtomicVersion>,
701}
702
703impl<SS> Manager<SS> {
704    /// Constructs a [`Manager`] that initially has no security server delegate (i.e., will default
705    /// to deny all requests).
706    pub fn new() -> Self {
707        Self {
708            shared_cache: Locked::new(FifoQueryCache::new(Weak::<SS>::new(), DEFAULT_SHARED_SIZE)),
709            thread_local_version: Arc::new(AtomicVersion::default()),
710        }
711    }
712
713    /// Sets the security server delegate that is consulted when there is no cache hit on a query.
714    pub fn set_security_server(&self, security_server: Weak<SS>) -> Weak<SS> {
715        self.shared_cache.set_stateful_cache_delegate(security_server)
716    }
717
718    /// Returns a shared reference to the shared cache managed by this manager. This operation does
719    /// not copy the cache, but it does perform an atomic operation to update a reference count.
720    pub fn get_shared_cache(&self) -> &Locked<FifoQueryCache<Weak<SS>>> {
721        &self.shared_cache
722    }
723
724    /// Constructs a new thread-local cache that will delegate to the shared cache managed by this
725    /// manager (which, in turn, delegates to its security server).
726    pub fn new_thread_local_cache(
727        &self,
728    ) -> ThreadLocalQuery<FifoQueryCache<Locked<FifoQueryCache<Weak<SS>>>>> {
729        ThreadLocalQuery::new(
730            self.thread_local_version.clone(),
731            FifoQueryCache::new(self.shared_cache.clone(), DEFAULT_THREAD_LOCAL_SIZE),
732        )
733    }
734}
735
736impl<SS> Reset for Manager<SS> {
737    /// Resets caches owned by this manager. If owned caches delegate to a security server that is
738    /// reloading its policy, the security server must reload its policy (and start serving the new
739    /// policy) *before* invoking `Manager::reset()` on any managers that delegate to that security
740    /// server. This is because the [`Manager`]-managed caches are consulted by [`Query`] clients
741    /// *before* the security server; performing reload/reset in the reverse order could move stale
742    /// queries into reset caches before policy reload is complete.
743    fn reset(&self) -> bool {
744        // Layered cache stale entries avoided only if shared cache reset first, then thread-local
745        // caches are reset. This is because thread-local caches are consulted by `Query` clients
746        // before the shared cache; performing reset in the reverse order could move stale queries
747        // into reset caches.
748        self.shared_cache.reset();
749        self.thread_local_version.reset();
750        true
751    }
752}
753
754/// Test constants and helpers shared by `tests` and `starnix_tests`.
755#[cfg(test)]
756mod testing {
757    use crate::SecurityId;
758
759    use std::num::NonZeroU32;
760    use std::sync::atomic::{AtomicU32, Ordering};
761    use std::sync::LazyLock;
762
763    /// SID to use where any value will do.
764    pub(super) static A_TEST_SID: LazyLock<SecurityId> = LazyLock::new(unique_sid);
765
766    /// Default fixed cache capacity to request in tests.
767    pub(super) const TEST_CAPACITY: usize = 10;
768
769    /// Returns a new `SecurityId` with unique id.
770    pub(super) fn unique_sid() -> SecurityId {
771        static NEXT_ID: AtomicU32 = AtomicU32::new(1000);
772        SecurityId(NonZeroU32::new(NEXT_ID.fetch_add(1, Ordering::AcqRel)).unwrap())
773    }
774
775    /// Returns a vector of `count` unique `SecurityIds`.
776    pub(super) fn unique_sids(count: usize) -> Vec<SecurityId> {
777        (0..count).map(|_| unique_sid()).collect()
778    }
779}
780
781#[cfg(test)]
782mod tests {
783    use super::testing::*;
784    use super::*;
785    use crate::policy::{AccessVector, XpermsBitmap};
786    use crate::KernelClass;
787
788    use std::sync::atomic::AtomicUsize;
789
790    /// No-op policy query delegate that allows all permissions and maintains no internal state, for testing.
791    #[derive(Default)]
792    struct TestDelegate {
793        query_count: AtomicUsize,
794        reset_count: AtomicUsize,
795    }
796
797    impl TestDelegate {
798        fn query_count(&self) -> usize {
799            self.query_count.load(Ordering::Relaxed)
800        }
801
802        fn reset_count(&self) -> usize {
803            self.reset_count.load(Ordering::Relaxed)
804        }
805    }
806
807    impl Query for TestDelegate {
808        fn compute_access_decision(
809            &self,
810            _source_sid: SecurityId,
811            _target_sid: SecurityId,
812            _target_class: ObjectClass,
813        ) -> AccessDecision {
814            self.query_count.fetch_add(1, Ordering::Relaxed);
815            AccessDecision::allow(AccessVector::ALL)
816        }
817
818        fn compute_new_fs_node_sid(
819            &self,
820            _source_sid: SecurityId,
821            _target_sid: SecurityId,
822            _fs_node_class: FsNodeClass,
823        ) -> Result<SecurityId, anyhow::Error> {
824            unreachable!()
825        }
826
827        fn compute_new_fs_node_sid_with_name(
828            &self,
829            _source_sid: SecurityId,
830            _target_sid: SecurityId,
831            _fs_node_class: FsNodeClass,
832            _fs_node_name: NullessByteStr<'_>,
833        ) -> Option<SecurityId> {
834            unreachable!()
835        }
836
837        fn compute_ioctl_access_decision(
838            &self,
839            _source_sid: SecurityId,
840            _target_sid: SecurityId,
841            _target_class: ObjectClass,
842            _ioctl_prefix: u8,
843        ) -> IoctlAccessDecision {
844            self.query_count.fetch_add(1, Ordering::Relaxed);
845            IoctlAccessDecision::ALLOW_ALL
846        }
847    }
848
849    impl Reset for TestDelegate {
850        fn reset(&self) -> bool {
851            self.reset_count.fetch_add(1, Ordering::Relaxed);
852            true
853        }
854    }
855
856    #[test]
857    fn fixed_access_vector_cache_add_entry() {
858        let mut avc = FifoQueryCache::<_>::new(TestDelegate::default(), TEST_CAPACITY);
859        assert_eq!(0, avc.delegate.query_count());
860        assert_eq!(
861            AccessVector::ALL,
862            avc.compute_access_decision(
863                A_TEST_SID.clone(),
864                A_TEST_SID.clone(),
865                KernelClass::Process.into()
866            )
867            .allow
868        );
869        assert_eq!(1, avc.delegate.query_count());
870        assert_eq!(
871            AccessVector::ALL,
872            avc.compute_access_decision(
873                A_TEST_SID.clone(),
874                A_TEST_SID.clone(),
875                KernelClass::Process.into()
876            )
877            .allow
878        );
879        assert_eq!(1, avc.delegate.query_count());
880        assert_eq!(false, avc.access_cache_is_full());
881    }
882
883    #[test]
884    fn fixed_access_vector_cache_reset() {
885        let mut avc = FifoQueryCache::<_>::new(TestDelegate::default(), TEST_CAPACITY);
886
887        avc.reset();
888        assert_eq!(false, avc.access_cache_is_full());
889
890        assert_eq!(0, avc.delegate.query_count());
891        assert_eq!(
892            AccessVector::ALL,
893            avc.compute_access_decision(
894                A_TEST_SID.clone(),
895                A_TEST_SID.clone(),
896                KernelClass::Process.into()
897            )
898            .allow
899        );
900        assert_eq!(1, avc.delegate.query_count());
901        assert_eq!(false, avc.access_cache_is_full());
902
903        avc.reset();
904        assert_eq!(false, avc.access_cache_is_full());
905    }
906
907    #[test]
908    fn fixed_access_vector_cache_fill() {
909        let mut avc = FifoQueryCache::<_>::new(TestDelegate::default(), TEST_CAPACITY);
910
911        for sid in unique_sids(avc.access_cache.capacity()) {
912            avc.compute_access_decision(sid, A_TEST_SID.clone(), KernelClass::Process.into());
913        }
914        assert_eq!(true, avc.access_cache_is_full());
915
916        avc.reset();
917        assert_eq!(false, avc.access_cache_is_full());
918
919        for sid in unique_sids(avc.access_cache.capacity()) {
920            avc.compute_access_decision(A_TEST_SID.clone(), sid, KernelClass::Process.into());
921        }
922        assert_eq!(true, avc.access_cache_is_full());
923
924        avc.reset();
925        assert_eq!(false, avc.access_cache_is_full());
926    }
927
928    #[test]
929    fn fixed_access_vector_cache_full_miss() {
930        let mut avc = FifoQueryCache::<_>::new(TestDelegate::default(), TEST_CAPACITY);
931
932        // Make the test query, which will trivially miss.
933        avc.compute_access_decision(
934            A_TEST_SID.clone(),
935            A_TEST_SID.clone(),
936            KernelClass::Process.into(),
937        );
938        assert!(!avc.access_cache_is_full());
939
940        // Fill the cache with new queries, which should evict the test query.
941        for sid in unique_sids(avc.access_cache.capacity()) {
942            avc.compute_access_decision(sid, A_TEST_SID.clone(), KernelClass::Process.into());
943        }
944        assert!(avc.access_cache_is_full());
945
946        // Making the test query should result in another miss.
947        let delegate_query_count = avc.delegate.query_count();
948        avc.compute_access_decision(
949            A_TEST_SID.clone(),
950            A_TEST_SID.clone(),
951            KernelClass::Process.into(),
952        );
953        assert_eq!(delegate_query_count + 1, avc.delegate.query_count());
954
955        // Because the cache is not LRU, making `capacity()` unique queries, each preceded by
956        // the test query, will still result in the test query result being evicted.
957        // Each test query will hit, and the interleaved queries will miss, with the final of the
958        // interleaved queries evicting the test query.
959        for sid in unique_sids(avc.access_cache.capacity()) {
960            avc.compute_access_decision(
961                A_TEST_SID.clone(),
962                A_TEST_SID.clone(),
963                KernelClass::Process.into(),
964            );
965            avc.compute_access_decision(sid, A_TEST_SID.clone(), KernelClass::Process.into());
966        }
967
968        // The test query should now miss.
969        let delegate_query_count = avc.delegate.query_count();
970        avc.compute_access_decision(
971            A_TEST_SID.clone(),
972            A_TEST_SID.clone(),
973            KernelClass::Process.into(),
974        );
975        assert_eq!(delegate_query_count + 1, avc.delegate.query_count());
976    }
977
978    #[test]
979    fn thread_local_query_access_vector_cache_reset() {
980        let cache_version = Arc::new(AtomicVersion::default());
981        let mut avc = ThreadLocalQuery::new(cache_version.clone(), TestDelegate::default());
982
983        // Reset deferred to next query.
984        assert_eq!(0, avc.delegate.reset_count());
985        cache_version.reset();
986        assert_eq!(0, avc.delegate.reset_count());
987        avc.compute_access_decision(
988            A_TEST_SID.clone(),
989            A_TEST_SID.clone(),
990            KernelClass::Process.into(),
991        );
992        assert_eq!(1, avc.delegate.reset_count());
993    }
994
995    #[test]
996    fn access_vector_cache_ioctl_hit() {
997        let mut avc = FifoQueryCache::<_>::new(TestDelegate::default(), TEST_CAPACITY);
998        assert_eq!(0, avc.delegate.query_count());
999        assert_eq!(
1000            XpermsBitmap::ALL,
1001            avc.compute_ioctl_access_decision(
1002                A_TEST_SID.clone(),
1003                A_TEST_SID.clone(),
1004                KernelClass::Process.into(),
1005                0x0,
1006            )
1007            .allow
1008        );
1009        assert_eq!(1, avc.delegate.query_count());
1010        // The second request for the same key is a cache hit.
1011        assert_eq!(
1012            XpermsBitmap::ALL,
1013            avc.compute_ioctl_access_decision(
1014                A_TEST_SID.clone(),
1015                A_TEST_SID.clone(),
1016                KernelClass::Process.into(),
1017                0x0
1018            )
1019            .allow
1020        );
1021        assert_eq!(1, avc.delegate.query_count());
1022    }
1023
1024    #[test]
1025    fn access_vector_cache_ioctl_miss() {
1026        let mut avc = FifoQueryCache::<_>::new(TestDelegate::default(), TEST_CAPACITY);
1027
1028        // Make the test query, which will trivially miss.
1029        avc.compute_ioctl_access_decision(
1030            A_TEST_SID.clone(),
1031            A_TEST_SID.clone(),
1032            KernelClass::Process.into(),
1033            0x0,
1034        );
1035
1036        // Fill the ioctl cache with new queries, which should evict the test query.
1037        for ioctl_prefix in 0x1..(1 + avc.ioctl_access_cache.capacity())
1038            .try_into()
1039            .expect("assumed that test ioctl cache capacity was < 255")
1040        {
1041            avc.compute_ioctl_access_decision(
1042                A_TEST_SID.clone(),
1043                A_TEST_SID.clone(),
1044                KernelClass::Process.into(),
1045                ioctl_prefix,
1046            );
1047        }
1048        // Make sure that we've fulfilled at least one new cache miss since the original test query,
1049        // and that the cache is now full.
1050        assert!(avc.delegate.query_count() > 1);
1051        assert!(avc.ioctl_access_cache_is_full());
1052        let delegate_query_count = avc.delegate.query_count();
1053
1054        // Making the original test query again should result in another miss.
1055        avc.compute_ioctl_access_decision(
1056            A_TEST_SID.clone(),
1057            A_TEST_SID.clone(),
1058            KernelClass::Process.into(),
1059            0x0,
1060        );
1061        assert_eq!(delegate_query_count + 1, avc.delegate.query_count());
1062    }
1063}
1064
1065/// Async tests that depend on `fuchsia::test` only run in starnix.
1066#[cfg(test)]
1067#[cfg(feature = "selinux_starnix")]
1068mod starnix_tests {
1069    use super::testing::*;
1070    use super::*;
1071    use crate::policy::testing::{ACCESS_VECTOR_0001, ACCESS_VECTOR_0010};
1072    use crate::policy::AccessVector;
1073    use crate::KernelClass;
1074
1075    use rand::distributions::Uniform;
1076    use rand::{thread_rng, Rng as _};
1077    use std::collections::{HashMap, HashSet};
1078    use std::sync::atomic::AtomicU32;
1079    use std::thread::spawn;
1080
1081    const NO_RIGHTS: u32 = 0;
1082    const READ_RIGHTS: u32 = 1;
1083    const WRITE_RIGHTS: u32 = 2;
1084
1085    const ACCESS_VECTOR_READ: AccessDecision = AccessDecision::allow(ACCESS_VECTOR_0001);
1086    const ACCESS_VECTOR_WRITE: AccessDecision = AccessDecision::allow(ACCESS_VECTOR_0010);
1087
1088    struct PolicyServer {
1089        policy: Arc<AtomicU32>,
1090    }
1091
1092    impl PolicyServer {
1093        fn set_policy(&self, policy: u32) {
1094            if policy > 2 {
1095                panic!("attempt to set policy to invalid value: {}", policy);
1096            }
1097            self.policy.as_ref().store(policy, Ordering::Relaxed);
1098        }
1099    }
1100
1101    impl Query for PolicyServer {
1102        fn compute_access_decision(
1103            &self,
1104            _source_sid: SecurityId,
1105            _target_sid: SecurityId,
1106            _target_class: ObjectClass,
1107        ) -> AccessDecision {
1108            let policy = self.policy.as_ref().load(Ordering::Relaxed);
1109            if policy == NO_RIGHTS {
1110                AccessDecision::default()
1111            } else if policy == READ_RIGHTS {
1112                ACCESS_VECTOR_READ
1113            } else if policy == WRITE_RIGHTS {
1114                ACCESS_VECTOR_WRITE
1115            } else {
1116                panic!("compute_access_decision found invalid policy: {}", policy);
1117            }
1118        }
1119
1120        fn compute_new_fs_node_sid(
1121            &self,
1122            _source_sid: SecurityId,
1123            _target_sid: SecurityId,
1124            _fs_node_class: FsNodeClass,
1125        ) -> Result<SecurityId, anyhow::Error> {
1126            unreachable!()
1127        }
1128
1129        fn compute_new_fs_node_sid_with_name(
1130            &self,
1131            _source_sid: SecurityId,
1132            _target_sid: SecurityId,
1133            _fs_node_class: FsNodeClass,
1134            _fs_node_name: NullessByteStr<'_>,
1135        ) -> Option<SecurityId> {
1136            unreachable!()
1137        }
1138
1139        fn compute_ioctl_access_decision(
1140            &self,
1141            _source_sid: SecurityId,
1142            _target_sid: SecurityId,
1143            _target_class: ObjectClass,
1144            _ioctl_prefix: u8,
1145        ) -> IoctlAccessDecision {
1146            todo!()
1147        }
1148    }
1149
1150    impl Reset for PolicyServer {
1151        fn reset(&self) -> bool {
1152            true
1153        }
1154    }
1155
1156    #[fuchsia::test]
1157    async fn thread_local_query_access_vector_cache_coherence() {
1158        for _ in 0..TEST_CAPACITY {
1159            test_thread_local_query_access_vector_cache_coherence().await
1160        }
1161    }
1162
1163    /// Tests cache coherence over two policy changes over a [`ThreadLocalQuery`].
1164    async fn test_thread_local_query_access_vector_cache_coherence() {
1165        let active_policy: Arc<AtomicU32> = Arc::new(Default::default());
1166        let policy_server: Arc<PolicyServer> =
1167            Arc::new(PolicyServer { policy: active_policy.clone() });
1168        let cache_version = Arc::new(AtomicVersion::default());
1169
1170        let fixed_avc = FifoQueryCache::<_>::new(policy_server.clone(), TEST_CAPACITY);
1171        let cache_version_for_avc = cache_version.clone();
1172        let mut query_avc = ThreadLocalQuery::new(cache_version_for_avc, fixed_avc);
1173
1174        policy_server.set_policy(NO_RIGHTS);
1175        let (tx, rx) = futures::channel::oneshot::channel();
1176        let query_thread = spawn(move || {
1177            let mut trace = vec![];
1178
1179            for _ in 0..2000 {
1180                trace.push(query_avc.compute_access_decision(
1181                    A_TEST_SID.clone(),
1182                    A_TEST_SID.clone(),
1183                    KernelClass::Process.into(),
1184                ))
1185            }
1186
1187            tx.send(trace).expect("send trace");
1188        });
1189
1190        let policy_server = PolicyServer { policy: active_policy.clone() };
1191        let cache_version_for_read = cache_version.clone();
1192        let set_read_thread = spawn(move || {
1193            std::thread::sleep(std::time::Duration::from_micros(1));
1194            policy_server.set_policy(READ_RIGHTS);
1195            cache_version_for_read.reset();
1196        });
1197
1198        let policy_server = PolicyServer { policy: active_policy.clone() };
1199        let cache_version_for_write = cache_version;
1200        let set_write_thread = spawn(move || {
1201            std::thread::sleep(std::time::Duration::from_micros(2));
1202            policy_server.set_policy(WRITE_RIGHTS);
1203            cache_version_for_write.reset();
1204        });
1205
1206        set_read_thread.join().expect("join set-policy-to-read");
1207        set_write_thread.join().expect("join set-policy-to-write");
1208        query_thread.join().expect("join query");
1209        let trace = rx.await.expect("receive trace");
1210        let mut observed_rights: HashSet<AccessVector> = Default::default();
1211        let mut prev_rights = AccessVector::NONE;
1212        for (i, rights) in trace.into_iter().enumerate() {
1213            if i != 0 && rights.allow != prev_rights {
1214                // Return-to-previous-rights => cache incoherence!
1215                assert!(!observed_rights.contains(&rights.allow));
1216                observed_rights.insert(rights.allow);
1217            }
1218
1219            prev_rights = rights.allow;
1220        }
1221    }
1222
1223    #[fuchsia::test]
1224    async fn locked_fixed_access_vector_cache_coherence() {
1225        for _ in 0..10 {
1226            test_locked_fixed_access_vector_cache_coherence().await
1227        }
1228    }
1229
1230    /// Tests cache coherence over two policy changes over a `Locked<Fixed>`.
1231    async fn test_locked_fixed_access_vector_cache_coherence() {
1232        //
1233        // Test setup
1234        //
1235
1236        let active_policy: Arc<AtomicU32> = Arc::new(Default::default());
1237        let policy_server = Arc::new(PolicyServer { policy: active_policy.clone() });
1238        let fixed_avc = FifoQueryCache::<_>::new(policy_server.clone(), TEST_CAPACITY);
1239        let avc = Locked::new(fixed_avc);
1240        let sids = unique_sids(30);
1241
1242        // Ensure the initial policy is `NO_RIGHTS`.
1243        policy_server.set_policy(NO_RIGHTS);
1244
1245        //
1246        // Test run: Two threads will query the AVC many times while two other threads make policy
1247        // changes.
1248        //
1249
1250        // Allow both query threads to synchronize on "last policy change has been made". Query
1251        // threads use this signal to ensure at least some of their queries occur after the last
1252        // policy change.
1253        let (tx_last_policy_change_1, rx_last_policy_change_1) =
1254            futures::channel::oneshot::channel();
1255        let (tx_last_policy_change_2, rx_last_policy_change_2) =
1256            futures::channel::oneshot::channel();
1257
1258        // Set up two querying threads. The number of iterations in each thread is highly likely
1259        // to perform queries that overlap with the two policy changes, but to be sure, use
1260        // `rx_last_policy_change_#` to synchronize  before last queries.
1261        let (tx1, rx1) = futures::channel::oneshot::channel();
1262        let avc_for_query_1 = avc.clone();
1263        let sids_for_query_1 = sids.clone();
1264
1265        let query_thread_1 = spawn(|| async move {
1266            let sids = sids_for_query_1;
1267            let mut trace = vec![];
1268
1269            for i in thread_rng().sample_iter(&Uniform::new(0, 20)).take(2000) {
1270                trace.push((
1271                    sids[i].clone(),
1272                    avc_for_query_1.compute_access_decision(
1273                        sids[i].clone(),
1274                        A_TEST_SID.clone(),
1275                        KernelClass::Process.into(),
1276                    ),
1277                ))
1278            }
1279
1280            rx_last_policy_change_1.await.expect("receive last-policy-change signal (1)");
1281
1282            for i in thread_rng().sample_iter(&Uniform::new(0, 20)).take(10) {
1283                trace.push((
1284                    sids[i].clone(),
1285                    avc_for_query_1.compute_access_decision(
1286                        sids[i].clone(),
1287                        A_TEST_SID.clone(),
1288                        KernelClass::Process.into(),
1289                    ),
1290                ))
1291            }
1292
1293            tx1.send(trace).expect("send trace 1");
1294
1295            //
1296            // Test expectations: After `<final-policy-reset>; avc.reset();
1297            // avc.compute_access_decision();`, all caches (including those that lazily reset on
1298            // next query) must contain *only* items consistent with the final policy: `(_, _, ) =>
1299            // WRITE`.
1300            //
1301
1302            for (_, result) in avc_for_query_1.delegate.lock().access_cache.iter() {
1303                assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1304            }
1305        });
1306
1307        let (tx2, rx2) = futures::channel::oneshot::channel();
1308        let avc_for_query_2 = avc.clone();
1309        let sids_for_query_2 = sids.clone();
1310
1311        let query_thread_2 = spawn(|| async move {
1312            let sids = sids_for_query_2;
1313            let mut trace = vec![];
1314
1315            for i in thread_rng().sample_iter(&Uniform::new(10, 30)).take(2000) {
1316                trace.push((
1317                    sids[i].clone(),
1318                    avc_for_query_2.compute_access_decision(
1319                        sids[i].clone(),
1320                        A_TEST_SID.clone(),
1321                        KernelClass::Process.into(),
1322                    ),
1323                ))
1324            }
1325
1326            rx_last_policy_change_2.await.expect("receive last-policy-change signal (2)");
1327
1328            for i in thread_rng().sample_iter(&Uniform::new(10, 30)).take(10) {
1329                trace.push((
1330                    sids[i].clone(),
1331                    avc_for_query_2.compute_access_decision(
1332                        sids[i].clone(),
1333                        A_TEST_SID.clone(),
1334                        KernelClass::Process.into(),
1335                    ),
1336                ))
1337            }
1338
1339            tx2.send(trace).expect("send trace 2");
1340
1341            //
1342            // Test expectations: After `<final-policy-reset>; avc.reset();
1343            // avc.compute_access_decision();`, all caches (including those that lazily reset on
1344            // next query) must contain *only* items consistent with the final policy: `(_, _, ) =>
1345            // NONE`.
1346            //
1347
1348            for (_, result) in avc_for_query_2.delegate.lock().access_cache.iter() {
1349                assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1350            }
1351        });
1352
1353        let policy_server_for_set_read = policy_server.clone();
1354        let avc_for_set_read = avc.clone();
1355        let (tx_set_read, rx_set_read) = futures::channel::oneshot::channel();
1356        let set_read_thread = spawn(move || {
1357            // Allow some queries to accumulate before first policy change.
1358            std::thread::sleep(std::time::Duration::from_micros(1));
1359
1360            // Set security server policy *first*, then reset caches. This is normally the
1361            // responsibility of the security server.
1362            policy_server_for_set_read.set_policy(READ_RIGHTS);
1363            avc_for_set_read.reset();
1364
1365            tx_set_read.send(true).expect("send set-read signal")
1366        });
1367
1368        let policy_server_for_set_write = policy_server.clone();
1369        let avc_for_set_write = avc;
1370        let set_write_thread = spawn(|| async move {
1371            // Complete set-read before executing set-write.
1372            rx_set_read.await.expect("receive set-write signal");
1373            std::thread::sleep(std::time::Duration::from_micros(1));
1374
1375            // Set security server policy *first*, then reset caches. This is normally the
1376            // responsibility of the security server.
1377            policy_server_for_set_write.set_policy(WRITE_RIGHTS);
1378            avc_for_set_write.reset();
1379
1380            tx_last_policy_change_1.send(true).expect("send last-policy-change signal (1)");
1381            tx_last_policy_change_2.send(true).expect("send last-policy-change signal (2)");
1382        });
1383
1384        // Join all threads.
1385        set_read_thread.join().expect("join set-policy-to-read");
1386        let _ = set_write_thread.join().expect("join set-policy-to-write").await;
1387        let _ = query_thread_1.join().expect("join query").await;
1388        let _ = query_thread_2.join().expect("join query").await;
1389
1390        // Receive traces from query threads.
1391        let trace_1 = rx1.await.expect("receive trace 1");
1392        let trace_2 = rx2.await.expect("receive trace 2");
1393
1394        //
1395        // Test expectations: Inspect individual query thread traces separately. For each thread,
1396        // group `(sid, 0, 0) -> AccessVector` trace items by `sid`, keeping them in chronological
1397        // order. Every such grouping should observe at most `NONE->READ`, `READ->WRITE`
1398        // transitions. Any other transitions suggests out-of-order "jitter" from stale cache items.
1399        //
1400        // We cannot expect stronger guarantees (e.g., across different queries). For example, the
1401        // following scheduling is possible:
1402        //
1403        // 1. Policy change thread changes policy from NONE to READ;
1404        // 2. Query thread qt queries q1, which as never been queried before. Result: READ.
1405        // 3. Query thread qt queries q0, which was cached before policy reload. Result: NONE.
1406        // 4. All caches reset.
1407        //
1408        // Notice that, ignoring query inputs, qt observes trace `..., READ, NONE`. However, such a
1409        // sequence must not occur when observing qt's trace filtered by query input (q1, q0, etc.).
1410        //
1411
1412        for trace in [trace_1, trace_2] {
1413            let mut trace_by_sid = HashMap::<SecurityId, Vec<AccessVector>>::new();
1414            for (sid, access_decision) in trace {
1415                trace_by_sid.entry(sid).or_insert(vec![]).push(access_decision.allow);
1416            }
1417            for access_vectors in trace_by_sid.values() {
1418                let initial_rights = AccessVector::NONE;
1419                let mut prev_rights = &initial_rights;
1420                for rights in access_vectors.iter() {
1421                    // Note: `WRITE > READ > NONE`.
1422                    assert!(rights >= prev_rights);
1423                    prev_rights = rights;
1424                }
1425            }
1426        }
1427    }
1428
1429    struct SecurityServer {
1430        manager: Manager<SecurityServer>,
1431        policy: Arc<AtomicU32>,
1432    }
1433
1434    impl SecurityServer {
1435        fn manager(&self) -> &Manager<SecurityServer> {
1436            &self.manager
1437        }
1438    }
1439
1440    impl Query for SecurityServer {
1441        fn compute_access_decision(
1442            &self,
1443            _source_sid: SecurityId,
1444            _target_sid: SecurityId,
1445            _target_class: ObjectClass,
1446        ) -> AccessDecision {
1447            let policy = self.policy.as_ref().load(Ordering::Relaxed);
1448            if policy == NO_RIGHTS {
1449                AccessDecision::default()
1450            } else if policy == READ_RIGHTS {
1451                ACCESS_VECTOR_READ
1452            } else if policy == WRITE_RIGHTS {
1453                ACCESS_VECTOR_WRITE
1454            } else {
1455                panic!("compute_access_decision found invalid policy: {}", policy);
1456            }
1457        }
1458
1459        fn compute_new_fs_node_sid(
1460            &self,
1461            _source_sid: SecurityId,
1462            _target_sid: SecurityId,
1463            _fs_node_class: FsNodeClass,
1464        ) -> Result<SecurityId, anyhow::Error> {
1465            unreachable!()
1466        }
1467
1468        fn compute_new_fs_node_sid_with_name(
1469            &self,
1470            _source_sid: SecurityId,
1471            _target_sid: SecurityId,
1472            _fs_node_class: FsNodeClass,
1473            _fs_node_name: NullessByteStr<'_>,
1474        ) -> Option<SecurityId> {
1475            unreachable!()
1476        }
1477
1478        fn compute_ioctl_access_decision(
1479            &self,
1480            _source_sid: SecurityId,
1481            _target_sid: SecurityId,
1482            _target_class: ObjectClass,
1483            _ioctl_prefix: u8,
1484        ) -> IoctlAccessDecision {
1485            todo!()
1486        }
1487    }
1488
1489    impl Reset for SecurityServer {
1490        fn reset(&self) -> bool {
1491            true
1492        }
1493    }
1494
1495    #[fuchsia::test]
1496    async fn manager_cache_coherence() {
1497        for _ in 0..10 {
1498            test_manager_cache_coherence().await
1499        }
1500    }
1501
1502    /// Tests cache coherence over two policy changes over a `Locked<Fixed>`.
1503    async fn test_manager_cache_coherence() {
1504        //
1505        // Test setup
1506        //
1507
1508        let (active_policy, security_server) = {
1509            // Carefully initialize strong and weak references between security server and its cache
1510            // manager.
1511
1512            let manager = Manager::new();
1513
1514            // Initialize `security_server` to own `manager`.
1515            let active_policy: Arc<AtomicU32> = Arc::new(Default::default());
1516            let security_server =
1517                Arc::new(SecurityServer { manager, policy: active_policy.clone() });
1518
1519            // Replace `security_server.manager`'s  empty `Weak` with `Weak<security_server>` to
1520            // start servering `security_server`'s policy out of `security_server.manager`'s cache.
1521            security_server
1522                .as_ref()
1523                .manager()
1524                .set_security_server(Arc::downgrade(&security_server));
1525
1526            (active_policy, security_server)
1527        };
1528        let sids = unique_sids(30);
1529
1530        fn set_policy(owner: &Arc<AtomicU32>, policy: u32) {
1531            if policy > 2 {
1532                panic!("attempt to set policy to invalid value: {}", policy);
1533            }
1534            owner.as_ref().store(policy, Ordering::Relaxed);
1535        }
1536
1537        // Ensure the initial policy is `NO_RIGHTS`.
1538        set_policy(&active_policy, NO_RIGHTS);
1539
1540        //
1541        // Test run: Two threads will query the AVC many times while two other threads make policy
1542        // changes.
1543        //
1544
1545        // Allow both query threads to synchronize on "last policy change has been made". Query
1546        // threads use this signal to ensure at least some of their queries occur after the last
1547        // policy change.
1548        let (tx_last_policy_change_1, rx_last_policy_change_1) =
1549            futures::channel::oneshot::channel();
1550        let (tx_last_policy_change_2, rx_last_policy_change_2) =
1551            futures::channel::oneshot::channel();
1552
1553        // Set up two querying threads. The number of iterations in each thread is highly likely
1554        // to perform queries that overlap with the two policy changes, but to be sure, use
1555        // `rx_last_policy_change_#` to synchronize  before last queries.
1556        let (tx1, rx1) = futures::channel::oneshot::channel();
1557        let mut avc_for_query_1 = security_server.manager().new_thread_local_cache();
1558        let sids_for_query_1 = sids.clone();
1559
1560        let query_thread_1 = spawn(|| async move {
1561            let sids = sids_for_query_1;
1562            let mut trace = vec![];
1563
1564            for i in thread_rng().sample_iter(&Uniform::new(0, 20)).take(2000) {
1565                trace.push((
1566                    sids[i].clone(),
1567                    avc_for_query_1.compute_access_decision(
1568                        sids[i].clone(),
1569                        A_TEST_SID.clone(),
1570                        KernelClass::Process.into(),
1571                    ),
1572                ))
1573            }
1574
1575            rx_last_policy_change_1.await.expect("receive last-policy-change signal (1)");
1576
1577            for i in thread_rng().sample_iter(&Uniform::new(0, 20)).take(10) {
1578                trace.push((
1579                    sids[i].clone(),
1580                    avc_for_query_1.compute_access_decision(
1581                        sids[i].clone(),
1582                        A_TEST_SID.clone(),
1583                        KernelClass::Process.into(),
1584                    ),
1585                ))
1586            }
1587
1588            tx1.send(trace).expect("send trace 1");
1589
1590            //
1591            // Test expectations: After `<final-policy-reset>; avc.reset();
1592            // avc.compute_access_decision();`, all caches (including those that lazily reset on
1593            // next query) must contain *only* items consistent with the final policy: `(_, _, ) =>
1594            // WRITE`.
1595            //
1596
1597            for (_, result) in avc_for_query_1.delegate.access_cache.iter() {
1598                assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1599            }
1600        });
1601
1602        let (tx2, rx2) = futures::channel::oneshot::channel();
1603        let mut avc_for_query_2 = security_server.manager().new_thread_local_cache();
1604        let sids_for_query_2 = sids.clone();
1605
1606        let query_thread_2 = spawn(|| async move {
1607            let sids = sids_for_query_2;
1608            let mut trace = vec![];
1609
1610            for i in thread_rng().sample_iter(&Uniform::new(10, 30)).take(2000) {
1611                trace.push((
1612                    sids[i].clone(),
1613                    avc_for_query_2.compute_access_decision(
1614                        sids[i].clone(),
1615                        A_TEST_SID.clone(),
1616                        KernelClass::Process.into(),
1617                    ),
1618                ))
1619            }
1620
1621            rx_last_policy_change_2.await.expect("receive last-policy-change signal (2)");
1622
1623            for i in thread_rng().sample_iter(&Uniform::new(10, 30)).take(10) {
1624                trace.push((
1625                    sids[i].clone(),
1626                    avc_for_query_2.compute_access_decision(
1627                        sids[i].clone(),
1628                        A_TEST_SID.clone(),
1629                        KernelClass::Process.into(),
1630                    ),
1631                ))
1632            }
1633
1634            tx2.send(trace).expect("send trace 2");
1635
1636            //
1637            // Test expectations: After `<final-policy-reset>; avc.reset();
1638            // avc.compute_access_decision();`, all caches (including those that lazily reset on
1639            // next query) must contain *only* items consistent with the final policy: `(_, _, ) =>
1640            // WRITE`.
1641            //
1642
1643            for (_, result) in avc_for_query_2.delegate.access_cache.iter() {
1644                assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1645            }
1646        });
1647
1648        // Set up two threads that will update the security policy *first*, then reset caches.
1649        // The threads synchronize to ensure a policy order of NONE->READ->WRITE.
1650        let active_policy_for_set_read = active_policy.clone();
1651        let security_server_for_set_read = security_server.clone();
1652        let (tx_set_read, rx_set_read) = futures::channel::oneshot::channel();
1653        let set_read_thread = spawn(move || {
1654            // Allow some queries to accumulate before first policy change.
1655            std::thread::sleep(std::time::Duration::from_micros(1));
1656
1657            // Set security server policy *first*, then reset caches. This is normally the
1658            // responsibility of the security server.
1659            set_policy(&active_policy_for_set_read, READ_RIGHTS);
1660            security_server_for_set_read.manager().reset();
1661
1662            tx_set_read.send(true).expect("send set-read signal")
1663        });
1664        let active_policy_for_set_write = active_policy.clone();
1665        let security_server_for_set_write = security_server.clone();
1666        let set_write_thread = spawn(|| async move {
1667            // Complete set-read before executing set-write.
1668            rx_set_read.await.expect("receive set-read signal");
1669            std::thread::sleep(std::time::Duration::from_micros(1));
1670
1671            // Set security server policy *first*, then reset caches. This is normally the
1672            // responsibility of the security server.
1673            set_policy(&active_policy_for_set_write, WRITE_RIGHTS);
1674            security_server_for_set_write.manager().reset();
1675
1676            tx_last_policy_change_1.send(true).expect("send last-policy-change signal (1)");
1677            tx_last_policy_change_2.send(true).expect("send last-policy-change signal (2)");
1678        });
1679
1680        // Join all threads.
1681        set_read_thread.join().expect("join set-policy-to-read");
1682        let _ = set_write_thread.join().expect("join set-policy-to-write").await;
1683        let _ = query_thread_1.join().expect("join query").await;
1684        let _ = query_thread_2.join().expect("join query").await;
1685
1686        // Receive traces from query threads.
1687        let trace_1 = rx1.await.expect("receive trace 1");
1688        let trace_2 = rx2.await.expect("receive trace 2");
1689
1690        //
1691        // Test expectations: Inspect individual query thread traces separately. For each thread,
1692        // group `(sid, 0, 0) -> AccessVector` trace items by `sid`, keeping them in chronological
1693        // order. Every such grouping should observe at most `NONE->READ`, `READ->WRITE`
1694        // transitions. Any other transitions suggests out-of-order "jitter" from stale cache items.
1695        //
1696        // We cannot expect stronger guarantees (e.g., across different queries). For example, the
1697        // following scheduling is possible:
1698        //
1699        // 1. Policy change thread changes policy from NONE to READ;
1700        // 2. Query thread qt queries q1, which as never been queried before. Result: READ.
1701        // 3. Query thread qt queries q0, which was cached before policy reload. Result: NONE.
1702        // 4. All caches reset.
1703        //
1704        // Notice that, ignoring query inputs, qt observes `..., READ, NONE`. However, such a
1705        // sequence must not occur when observing qt's trace filtered by query input (q1, q0, etc.).
1706        //
1707        // Finally, the shared (`Locked`) cache should contain only entries consistent with
1708        // the final policy: `(_, _, ) => WRITE`.
1709        //
1710
1711        for trace in [trace_1, trace_2] {
1712            let mut trace_by_sid = HashMap::<SecurityId, Vec<AccessVector>>::new();
1713            for (sid, access_decision) in trace {
1714                trace_by_sid.entry(sid).or_insert(vec![]).push(access_decision.allow);
1715            }
1716            for access_vectors in trace_by_sid.values() {
1717                let initial_rights = AccessVector::NONE;
1718                let mut prev_rights = &initial_rights;
1719                for rights in access_vectors.iter() {
1720                    // Note: `WRITE > READ > NONE`.
1721                    assert!(rights >= prev_rights);
1722                    prev_rights = rights;
1723                }
1724            }
1725        }
1726
1727        let shared_cache = security_server.manager().shared_cache.delegate.lock();
1728        for (_, result) in shared_cache.access_cache.iter() {
1729            assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1730        }
1731    }
1732}