1use crate::fifo_cache::FifoCache;
6use crate::policy::{AccessDecision, IoctlAccessDecision};
7use crate::sync::Mutex;
8use crate::{FsNodeClass, KernelClass, NullessByteStr, ObjectClass, SecurityId};
9use std::sync::atomic::{AtomicU64, Ordering};
10use std::sync::{Arc, Weak};
11
12pub use crate::fifo_cache::{CacheStats, HasCacheStats};
13
14pub(super) trait Query {
21 fn compute_access_decision(
24 &self,
25 source_sid: SecurityId,
26 target_sid: SecurityId,
27 target_class: ObjectClass,
28 ) -> AccessDecision;
29
30 fn compute_new_fs_node_sid(
35 &self,
36 source_sid: SecurityId,
37 target_sid: SecurityId,
38 fs_node_class: FsNodeClass,
39 ) -> Result<SecurityId, anyhow::Error>;
40
41 fn compute_new_fs_node_sid_with_name(
46 &self,
47 source_sid: SecurityId,
48 target_sid: SecurityId,
49 fs_node_class: FsNodeClass,
50 fs_node_name: NullessByteStr<'_>,
51 ) -> Option<SecurityId>;
52
53 fn compute_ioctl_access_decision(
56 &self,
57 source_sid: SecurityId,
58 target_sid: SecurityId,
59 target_class: ObjectClass,
60 ioctl_prefix: u8,
61 ) -> IoctlAccessDecision;
62}
63
64pub trait QueryMut {
67 fn compute_access_decision(
70 &mut self,
71 source_sid: SecurityId,
72 target_sid: SecurityId,
73 target_class: ObjectClass,
74 ) -> AccessDecision;
75
76 fn compute_new_fs_node_sid(
81 &mut self,
82 source_sid: SecurityId,
83 target_sid: SecurityId,
84 fs_node_class: FsNodeClass,
85 ) -> Result<SecurityId, anyhow::Error>;
86
87 fn compute_new_fs_node_sid_with_name(
92 &mut self,
93 source_sid: SecurityId,
94 target_sid: SecurityId,
95 fs_node_class: FsNodeClass,
96 fs_node_name: NullessByteStr<'_>,
97 ) -> Option<SecurityId>;
98
99 fn compute_ioctl_access_decision(
102 &mut self,
103 source_sid: SecurityId,
104 target_sid: SecurityId,
105 target_class: ObjectClass,
106 ioctl_prefix: u8,
107 ) -> IoctlAccessDecision;
108}
109
110impl<Q: Query> QueryMut for Q {
111 fn compute_access_decision(
112 &mut self,
113 source_sid: SecurityId,
114 target_sid: SecurityId,
115 target_class: ObjectClass,
116 ) -> AccessDecision {
117 (self as &dyn Query).compute_access_decision(source_sid, target_sid, target_class)
118 }
119
120 fn compute_new_fs_node_sid(
121 &mut self,
122 source_sid: SecurityId,
123 target_sid: SecurityId,
124 fs_node_class: FsNodeClass,
125 ) -> Result<SecurityId, anyhow::Error> {
126 (self as &dyn Query).compute_new_fs_node_sid(source_sid, target_sid, fs_node_class)
127 }
128
129 fn compute_new_fs_node_sid_with_name(
130 &mut self,
131 source_sid: SecurityId,
132 target_sid: SecurityId,
133 fs_node_class: FsNodeClass,
134 fs_node_name: NullessByteStr<'_>,
135 ) -> Option<SecurityId> {
136 (self as &dyn Query).compute_new_fs_node_sid_with_name(
137 source_sid,
138 target_sid,
139 fs_node_class,
140 fs_node_name,
141 )
142 }
143
144 fn compute_ioctl_access_decision(
145 &mut self,
146 source_sid: SecurityId,
147 target_sid: SecurityId,
148 target_class: ObjectClass,
149 ioctl_prefix: u8,
150 ) -> IoctlAccessDecision {
151 (self as &dyn Query).compute_ioctl_access_decision(
152 source_sid,
153 target_sid,
154 target_class,
155 ioctl_prefix,
156 )
157 }
158}
159
160pub(super) trait Reset {
163 fn reset(&self) -> bool;
166}
167
168pub(super) trait ResetMut {
170 fn reset(&mut self) -> bool;
173}
174
175impl<R: Reset> ResetMut for R {
176 fn reset(&mut self) -> bool {
177 (self as &dyn Reset).reset()
178 }
179}
180
181pub(super) trait ProxyMut<D> {
182 fn set_delegate(&mut self, delegate: D) -> D;
183}
184
185#[derive(Clone, Hash, PartialEq, Eq)]
186struct AccessQueryArgs {
187 source_sid: SecurityId,
188 target_sid: SecurityId,
189 target_class: ObjectClass,
190}
191
192#[derive(Clone)]
193struct AccessQueryResult {
194 access_decision: AccessDecision,
195 new_file_sid: Option<SecurityId>,
196}
197
198#[derive(Clone, Hash, PartialEq, Eq)]
199struct IoctlAccessQueryArgs {
200 source_sid: SecurityId,
201 target_sid: SecurityId,
202 target_class: ObjectClass,
203 ioctl_prefix: u8,
204}
205
206pub(super) struct FifoQueryCache<D> {
208 access_cache: FifoCache<AccessQueryArgs, AccessQueryResult>,
209 ioctl_access_cache: FifoCache<IoctlAccessQueryArgs, IoctlAccessDecision>,
210 delegate: D,
211}
212
213impl<D> FifoQueryCache<D> {
214 const IOCTL_CAPACITY_MULTIPLIER: f32 = 0.25;
216
217 pub fn new(delegate: D, capacity: usize) -> Self {
223 assert!(capacity > 0, "cannot instantiate fixed access vector cache of size 0");
224 let ioctl_access_cache_capacity =
225 (Self::IOCTL_CAPACITY_MULTIPLIER * (capacity as f32)) as usize;
226 assert!(
227 ioctl_access_cache_capacity > 0,
228 "cannot instantiate ioctl cache partition of size 0"
229 );
230
231 Self {
232 access_cache: FifoCache::with_capacity(capacity),
235 ioctl_access_cache: FifoCache::with_capacity(ioctl_access_cache_capacity),
236 delegate,
237 }
238 }
239
240 #[cfg(test)]
242 fn access_cache_is_full(&self) -> bool {
243 self.access_cache.is_full()
244 }
245
246 #[cfg(test)]
248 fn ioctl_access_cache_is_full(&self) -> bool {
249 self.ioctl_access_cache.is_full()
250 }
251}
252
253impl<D: QueryMut> QueryMut for FifoQueryCache<D> {
254 fn compute_access_decision(
255 &mut self,
256 source_sid: SecurityId,
257 target_sid: SecurityId,
258 target_class: ObjectClass,
259 ) -> AccessDecision {
260 let query_args =
261 AccessQueryArgs { source_sid, target_sid, target_class: target_class.clone() };
262 if let Some(result) = self.access_cache.get(&query_args) {
263 return result.access_decision.clone();
264 }
265
266 let access_decision =
267 self.delegate.compute_access_decision(source_sid, target_sid, target_class);
268
269 self.access_cache.insert(
270 query_args,
271 AccessQueryResult { access_decision: access_decision.clone(), new_file_sid: None },
272 );
273
274 access_decision
275 }
276
277 fn compute_new_fs_node_sid(
278 &mut self,
279 source_sid: SecurityId,
280 target_sid: SecurityId,
281 fs_node_class: FsNodeClass,
282 ) -> Result<SecurityId, anyhow::Error> {
283 let target_class = ObjectClass::Kernel(KernelClass::from(fs_node_class));
284
285 let query_args =
286 AccessQueryArgs { source_sid, target_sid, target_class: target_class.clone() };
287 let query_result = if let Some(result) = self.access_cache.get(&query_args) {
288 result
289 } else {
290 let access_decision =
291 self.delegate.compute_access_decision(source_sid, target_sid, target_class);
292 self.access_cache
293 .insert(query_args, AccessQueryResult { access_decision, new_file_sid: None })
294 };
295
296 if let Some(new_file_sid) = query_result.new_file_sid {
297 Ok(new_file_sid)
298 } else {
299 let new_file_sid =
300 self.delegate.compute_new_fs_node_sid(source_sid, target_sid, fs_node_class);
301 if let Ok(new_file_sid) = new_file_sid {
302 query_result.new_file_sid = Some(new_file_sid);
303 }
304 new_file_sid
305 }
306 }
307
308 fn compute_new_fs_node_sid_with_name(
309 &mut self,
310 source_sid: SecurityId,
311 target_sid: SecurityId,
312 fs_node_class: FsNodeClass,
313 fs_node_name: NullessByteStr<'_>,
314 ) -> Option<SecurityId> {
315 self.delegate.compute_new_fs_node_sid_with_name(
316 source_sid,
317 target_sid,
318 fs_node_class,
319 fs_node_name,
320 )
321 }
322
323 fn compute_ioctl_access_decision(
324 &mut self,
325 source_sid: SecurityId,
326 target_sid: SecurityId,
327 target_class: ObjectClass,
328 ioctl_prefix: u8,
329 ) -> IoctlAccessDecision {
330 let query_args = IoctlAccessQueryArgs {
331 source_sid,
332 target_sid,
333 target_class: target_class.clone(),
334 ioctl_prefix,
335 };
336 if let Some(result) = self.ioctl_access_cache.get(&query_args) {
337 return result.clone();
338 }
339
340 let ioctl_access_decision = self.delegate.compute_ioctl_access_decision(
341 source_sid,
342 target_sid,
343 target_class,
344 ioctl_prefix,
345 );
346
347 self.ioctl_access_cache.insert(query_args, ioctl_access_decision.clone());
348
349 ioctl_access_decision
350 }
351}
352
353impl<D> HasCacheStats for FifoQueryCache<D> {
354 fn cache_stats(&self) -> CacheStats {
355 &self.access_cache.cache_stats() + &self.ioctl_access_cache.cache_stats()
356 }
357}
358
359impl<D> ResetMut for FifoQueryCache<D> {
360 fn reset(&mut self) -> bool {
361 self.access_cache = FifoCache::with_capacity(self.access_cache.capacity());
362 self.ioctl_access_cache = FifoCache::with_capacity(self.ioctl_access_cache.capacity());
363 true
364 }
365}
366
367impl<D> ProxyMut<D> for FifoQueryCache<D> {
368 fn set_delegate(&mut self, mut delegate: D) -> D {
369 std::mem::swap(&mut self.delegate, &mut delegate);
370 delegate
371 }
372}
373
374pub(super) struct Locked<D> {
376 delegate: Arc<Mutex<D>>,
377}
378
379impl<D> Clone for Locked<D> {
380 fn clone(&self) -> Self {
381 Self { delegate: self.delegate.clone() }
382 }
383}
384
385impl<D> Locked<D> {
386 pub fn new(delegate: D) -> Self {
388 Self { delegate: Arc::new(Mutex::new(delegate)) }
389 }
390}
391
392impl<D: QueryMut> Query for Locked<D> {
393 fn compute_access_decision(
394 &self,
395 source_sid: SecurityId,
396 target_sid: SecurityId,
397 target_class: ObjectClass,
398 ) -> AccessDecision {
399 self.delegate.lock().compute_access_decision(source_sid, target_sid, target_class)
400 }
401
402 fn compute_new_fs_node_sid(
403 &self,
404 source_sid: SecurityId,
405 target_sid: SecurityId,
406 fs_node_class: FsNodeClass,
407 ) -> Result<SecurityId, anyhow::Error> {
408 self.delegate.lock().compute_new_fs_node_sid(source_sid, target_sid, fs_node_class)
409 }
410
411 fn compute_new_fs_node_sid_with_name(
412 &self,
413 source_sid: SecurityId,
414 target_sid: SecurityId,
415 fs_node_class: FsNodeClass,
416 fs_node_name: NullessByteStr<'_>,
417 ) -> Option<SecurityId> {
418 self.delegate.lock().compute_new_fs_node_sid_with_name(
419 source_sid,
420 target_sid,
421 fs_node_class,
422 fs_node_name,
423 )
424 }
425
426 fn compute_ioctl_access_decision(
427 &self,
428 source_sid: SecurityId,
429 target_sid: SecurityId,
430 target_class: ObjectClass,
431 ioctl_prefix: u8,
432 ) -> IoctlAccessDecision {
433 self.delegate.lock().compute_ioctl_access_decision(
434 source_sid,
435 target_sid,
436 target_class,
437 ioctl_prefix,
438 )
439 }
440}
441
442impl<D: HasCacheStats> HasCacheStats for Locked<D> {
443 fn cache_stats(&self) -> CacheStats {
444 self.delegate.lock().cache_stats()
445 }
446}
447
448impl<D: ResetMut> Reset for Locked<D> {
449 fn reset(&self) -> bool {
450 self.delegate.lock().reset()
451 }
452}
453
454impl<D> Locked<D> {
455 pub fn set_stateful_cache_delegate<PD>(&self, delegate: PD) -> PD
456 where
457 D: ProxyMut<PD>,
458 {
459 self.delegate.lock().set_delegate(delegate)
460 }
461}
462
463#[derive(Default)]
466pub struct AtomicVersion(AtomicU64);
467
468impl AtomicVersion {
469 pub fn version(&self) -> u64 {
471 self.0.load(Ordering::Relaxed)
472 }
473
474 pub fn increment_version(&self) {
476 self.0.fetch_add(1, Ordering::Relaxed);
477 }
478}
479
480impl Reset for AtomicVersion {
481 fn reset(&self) -> bool {
482 self.increment_version();
483 true
484 }
485}
486
487impl<Q: Query> Query for Arc<Q> {
488 fn compute_access_decision(
489 &self,
490 source_sid: SecurityId,
491 target_sid: SecurityId,
492 target_class: ObjectClass,
493 ) -> AccessDecision {
494 self.as_ref().compute_access_decision(source_sid, target_sid, target_class)
495 }
496
497 fn compute_new_fs_node_sid(
498 &self,
499 source_sid: SecurityId,
500 target_sid: SecurityId,
501 fs_node_class: FsNodeClass,
502 ) -> Result<SecurityId, anyhow::Error> {
503 self.as_ref().compute_new_fs_node_sid(source_sid, target_sid, fs_node_class)
504 }
505
506 fn compute_new_fs_node_sid_with_name(
507 &self,
508 source_sid: SecurityId,
509 target_sid: SecurityId,
510 fs_node_class: FsNodeClass,
511 fs_node_name: NullessByteStr<'_>,
512 ) -> Option<SecurityId> {
513 self.as_ref().compute_new_fs_node_sid_with_name(
514 source_sid,
515 target_sid,
516 fs_node_class,
517 fs_node_name,
518 )
519 }
520
521 fn compute_ioctl_access_decision(
522 &self,
523 source_sid: SecurityId,
524 target_sid: SecurityId,
525 target_class: ObjectClass,
526 ioctl_prefix: u8,
527 ) -> IoctlAccessDecision {
528 self.as_ref().compute_ioctl_access_decision(
529 source_sid,
530 target_sid,
531 target_class,
532 ioctl_prefix,
533 )
534 }
535}
536
537impl<R: Reset> Reset for Arc<R> {
538 fn reset(&self) -> bool {
539 self.as_ref().reset()
540 }
541}
542
543impl<Q: Query> Query for Weak<Q> {
544 fn compute_access_decision(
545 &self,
546 source_sid: SecurityId,
547 target_sid: SecurityId,
548 target_class: ObjectClass,
549 ) -> AccessDecision {
550 self.upgrade()
551 .map(|q| q.compute_access_decision(source_sid, target_sid, target_class))
552 .unwrap_or_default()
553 }
554
555 fn compute_new_fs_node_sid(
556 &self,
557 source_sid: SecurityId,
558 target_sid: SecurityId,
559 fs_node_class: FsNodeClass,
560 ) -> Result<SecurityId, anyhow::Error> {
561 self.upgrade()
562 .map(|q| q.compute_new_fs_node_sid(source_sid, target_sid, fs_node_class))
563 .unwrap_or(Err(anyhow::anyhow!("weak reference failed to resolve")))
564 }
565
566 fn compute_new_fs_node_sid_with_name(
567 &self,
568 source_sid: SecurityId,
569 target_sid: SecurityId,
570 fs_node_class: FsNodeClass,
571 fs_node_name: NullessByteStr<'_>,
572 ) -> Option<SecurityId> {
573 let delegate = self.upgrade()?;
574 delegate.compute_new_fs_node_sid_with_name(
575 source_sid,
576 target_sid,
577 fs_node_class,
578 fs_node_name,
579 )
580 }
581
582 fn compute_ioctl_access_decision(
583 &self,
584 source_sid: SecurityId,
585 target_sid: SecurityId,
586 target_class: ObjectClass,
587 ioctl_prefix: u8,
588 ) -> IoctlAccessDecision {
589 self.upgrade()
590 .map(|q| {
591 q.compute_ioctl_access_decision(source_sid, target_sid, target_class, ioctl_prefix)
592 })
593 .unwrap_or(IoctlAccessDecision::DENY_ALL)
594 }
595}
596
597impl<R: Reset> Reset for Weak<R> {
598 fn reset(&self) -> bool {
599 self.upgrade().as_deref().map(Reset::reset).unwrap_or(false)
600 }
601}
602
603pub(super) struct ThreadLocalQuery<D> {
612 delegate: D,
613 current_version: u64,
614 active_version: Arc<AtomicVersion>,
615}
616
617impl<D> ThreadLocalQuery<D> {
618 pub fn new(active_version: Arc<AtomicVersion>, delegate: D) -> Self {
620 Self { delegate, current_version: Default::default(), active_version }
621 }
622}
623
624impl<D: QueryMut + ResetMut> QueryMut for ThreadLocalQuery<D> {
625 fn compute_access_decision(
626 &mut self,
627 source_sid: SecurityId,
628 target_sid: SecurityId,
629 target_class: ObjectClass,
630 ) -> AccessDecision {
631 let version = self.active_version.as_ref().version();
632 if self.current_version != version {
633 self.current_version = version;
634 self.delegate.reset();
635 }
636
637 self.delegate.compute_access_decision(source_sid, target_sid, target_class)
639 }
640
641 fn compute_new_fs_node_sid(
642 &mut self,
643 source_sid: SecurityId,
644 target_sid: SecurityId,
645 fs_node_class: FsNodeClass,
646 ) -> Result<SecurityId, anyhow::Error> {
647 let version = self.active_version.as_ref().version();
648 if self.current_version != version {
649 self.current_version = version;
650 self.delegate.reset();
651 }
652
653 self.delegate.compute_new_fs_node_sid(source_sid, target_sid, fs_node_class)
655 }
656
657 fn compute_new_fs_node_sid_with_name(
658 &mut self,
659 source_sid: SecurityId,
660 target_sid: SecurityId,
661 fs_node_class: FsNodeClass,
662 fs_node_name: NullessByteStr<'_>,
663 ) -> Option<SecurityId> {
664 self.delegate.compute_new_fs_node_sid_with_name(
666 source_sid,
667 target_sid,
668 fs_node_class,
669 fs_node_name,
670 )
671 }
672
673 fn compute_ioctl_access_decision(
674 &mut self,
675 source_sid: SecurityId,
676 target_sid: SecurityId,
677 target_class: ObjectClass,
678 ioctl_prefix: u8,
679 ) -> IoctlAccessDecision {
680 self.delegate.compute_ioctl_access_decision(
681 source_sid,
682 target_sid,
683 target_class,
684 ioctl_prefix,
685 )
686 }
687}
688
689const DEFAULT_SHARED_SIZE: usize = 1000;
691
692const DEFAULT_THREAD_LOCAL_SIZE: usize = 10;
694
695pub(super) struct Manager<SS> {
699 shared_cache: Locked<FifoQueryCache<Weak<SS>>>,
700 thread_local_version: Arc<AtomicVersion>,
701}
702
703impl<SS> Manager<SS> {
704 pub fn new() -> Self {
707 Self {
708 shared_cache: Locked::new(FifoQueryCache::new(Weak::<SS>::new(), DEFAULT_SHARED_SIZE)),
709 thread_local_version: Arc::new(AtomicVersion::default()),
710 }
711 }
712
713 pub fn set_security_server(&self, security_server: Weak<SS>) -> Weak<SS> {
715 self.shared_cache.set_stateful_cache_delegate(security_server)
716 }
717
718 pub fn get_shared_cache(&self) -> &Locked<FifoQueryCache<Weak<SS>>> {
721 &self.shared_cache
722 }
723
724 pub fn new_thread_local_cache(
727 &self,
728 ) -> ThreadLocalQuery<FifoQueryCache<Locked<FifoQueryCache<Weak<SS>>>>> {
729 ThreadLocalQuery::new(
730 self.thread_local_version.clone(),
731 FifoQueryCache::new(self.shared_cache.clone(), DEFAULT_THREAD_LOCAL_SIZE),
732 )
733 }
734}
735
736impl<SS> Reset for Manager<SS> {
737 fn reset(&self) -> bool {
744 self.shared_cache.reset();
749 self.thread_local_version.reset();
750 true
751 }
752}
753
754#[cfg(test)]
756mod testing {
757 use crate::SecurityId;
758
759 use std::num::NonZeroU32;
760 use std::sync::atomic::{AtomicU32, Ordering};
761 use std::sync::LazyLock;
762
763 pub(super) static A_TEST_SID: LazyLock<SecurityId> = LazyLock::new(unique_sid);
765
766 pub(super) const TEST_CAPACITY: usize = 10;
768
769 pub(super) fn unique_sid() -> SecurityId {
771 static NEXT_ID: AtomicU32 = AtomicU32::new(1000);
772 SecurityId(NonZeroU32::new(NEXT_ID.fetch_add(1, Ordering::AcqRel)).unwrap())
773 }
774
775 pub(super) fn unique_sids(count: usize) -> Vec<SecurityId> {
777 (0..count).map(|_| unique_sid()).collect()
778 }
779}
780
781#[cfg(test)]
782mod tests {
783 use super::testing::*;
784 use super::*;
785 use crate::policy::{AccessVector, XpermsBitmap};
786 use crate::KernelClass;
787
788 use std::sync::atomic::AtomicUsize;
789
790 #[derive(Default)]
792 struct TestDelegate {
793 query_count: AtomicUsize,
794 reset_count: AtomicUsize,
795 }
796
797 impl TestDelegate {
798 fn query_count(&self) -> usize {
799 self.query_count.load(Ordering::Relaxed)
800 }
801
802 fn reset_count(&self) -> usize {
803 self.reset_count.load(Ordering::Relaxed)
804 }
805 }
806
807 impl Query for TestDelegate {
808 fn compute_access_decision(
809 &self,
810 _source_sid: SecurityId,
811 _target_sid: SecurityId,
812 _target_class: ObjectClass,
813 ) -> AccessDecision {
814 self.query_count.fetch_add(1, Ordering::Relaxed);
815 AccessDecision::allow(AccessVector::ALL)
816 }
817
818 fn compute_new_fs_node_sid(
819 &self,
820 _source_sid: SecurityId,
821 _target_sid: SecurityId,
822 _fs_node_class: FsNodeClass,
823 ) -> Result<SecurityId, anyhow::Error> {
824 unreachable!()
825 }
826
827 fn compute_new_fs_node_sid_with_name(
828 &self,
829 _source_sid: SecurityId,
830 _target_sid: SecurityId,
831 _fs_node_class: FsNodeClass,
832 _fs_node_name: NullessByteStr<'_>,
833 ) -> Option<SecurityId> {
834 unreachable!()
835 }
836
837 fn compute_ioctl_access_decision(
838 &self,
839 _source_sid: SecurityId,
840 _target_sid: SecurityId,
841 _target_class: ObjectClass,
842 _ioctl_prefix: u8,
843 ) -> IoctlAccessDecision {
844 self.query_count.fetch_add(1, Ordering::Relaxed);
845 IoctlAccessDecision::ALLOW_ALL
846 }
847 }
848
849 impl Reset for TestDelegate {
850 fn reset(&self) -> bool {
851 self.reset_count.fetch_add(1, Ordering::Relaxed);
852 true
853 }
854 }
855
856 #[test]
857 fn fixed_access_vector_cache_add_entry() {
858 let mut avc = FifoQueryCache::<_>::new(TestDelegate::default(), TEST_CAPACITY);
859 assert_eq!(0, avc.delegate.query_count());
860 assert_eq!(
861 AccessVector::ALL,
862 avc.compute_access_decision(
863 A_TEST_SID.clone(),
864 A_TEST_SID.clone(),
865 KernelClass::Process.into()
866 )
867 .allow
868 );
869 assert_eq!(1, avc.delegate.query_count());
870 assert_eq!(
871 AccessVector::ALL,
872 avc.compute_access_decision(
873 A_TEST_SID.clone(),
874 A_TEST_SID.clone(),
875 KernelClass::Process.into()
876 )
877 .allow
878 );
879 assert_eq!(1, avc.delegate.query_count());
880 assert_eq!(false, avc.access_cache_is_full());
881 }
882
883 #[test]
884 fn fixed_access_vector_cache_reset() {
885 let mut avc = FifoQueryCache::<_>::new(TestDelegate::default(), TEST_CAPACITY);
886
887 avc.reset();
888 assert_eq!(false, avc.access_cache_is_full());
889
890 assert_eq!(0, avc.delegate.query_count());
891 assert_eq!(
892 AccessVector::ALL,
893 avc.compute_access_decision(
894 A_TEST_SID.clone(),
895 A_TEST_SID.clone(),
896 KernelClass::Process.into()
897 )
898 .allow
899 );
900 assert_eq!(1, avc.delegate.query_count());
901 assert_eq!(false, avc.access_cache_is_full());
902
903 avc.reset();
904 assert_eq!(false, avc.access_cache_is_full());
905 }
906
907 #[test]
908 fn fixed_access_vector_cache_fill() {
909 let mut avc = FifoQueryCache::<_>::new(TestDelegate::default(), TEST_CAPACITY);
910
911 for sid in unique_sids(avc.access_cache.capacity()) {
912 avc.compute_access_decision(sid, A_TEST_SID.clone(), KernelClass::Process.into());
913 }
914 assert_eq!(true, avc.access_cache_is_full());
915
916 avc.reset();
917 assert_eq!(false, avc.access_cache_is_full());
918
919 for sid in unique_sids(avc.access_cache.capacity()) {
920 avc.compute_access_decision(A_TEST_SID.clone(), sid, KernelClass::Process.into());
921 }
922 assert_eq!(true, avc.access_cache_is_full());
923
924 avc.reset();
925 assert_eq!(false, avc.access_cache_is_full());
926 }
927
928 #[test]
929 fn fixed_access_vector_cache_full_miss() {
930 let mut avc = FifoQueryCache::<_>::new(TestDelegate::default(), TEST_CAPACITY);
931
932 avc.compute_access_decision(
934 A_TEST_SID.clone(),
935 A_TEST_SID.clone(),
936 KernelClass::Process.into(),
937 );
938 assert!(!avc.access_cache_is_full());
939
940 for sid in unique_sids(avc.access_cache.capacity()) {
942 avc.compute_access_decision(sid, A_TEST_SID.clone(), KernelClass::Process.into());
943 }
944 assert!(avc.access_cache_is_full());
945
946 let delegate_query_count = avc.delegate.query_count();
948 avc.compute_access_decision(
949 A_TEST_SID.clone(),
950 A_TEST_SID.clone(),
951 KernelClass::Process.into(),
952 );
953 assert_eq!(delegate_query_count + 1, avc.delegate.query_count());
954
955 for sid in unique_sids(avc.access_cache.capacity()) {
960 avc.compute_access_decision(
961 A_TEST_SID.clone(),
962 A_TEST_SID.clone(),
963 KernelClass::Process.into(),
964 );
965 avc.compute_access_decision(sid, A_TEST_SID.clone(), KernelClass::Process.into());
966 }
967
968 let delegate_query_count = avc.delegate.query_count();
970 avc.compute_access_decision(
971 A_TEST_SID.clone(),
972 A_TEST_SID.clone(),
973 KernelClass::Process.into(),
974 );
975 assert_eq!(delegate_query_count + 1, avc.delegate.query_count());
976 }
977
978 #[test]
979 fn thread_local_query_access_vector_cache_reset() {
980 let cache_version = Arc::new(AtomicVersion::default());
981 let mut avc = ThreadLocalQuery::new(cache_version.clone(), TestDelegate::default());
982
983 assert_eq!(0, avc.delegate.reset_count());
985 cache_version.reset();
986 assert_eq!(0, avc.delegate.reset_count());
987 avc.compute_access_decision(
988 A_TEST_SID.clone(),
989 A_TEST_SID.clone(),
990 KernelClass::Process.into(),
991 );
992 assert_eq!(1, avc.delegate.reset_count());
993 }
994
995 #[test]
996 fn access_vector_cache_ioctl_hit() {
997 let mut avc = FifoQueryCache::<_>::new(TestDelegate::default(), TEST_CAPACITY);
998 assert_eq!(0, avc.delegate.query_count());
999 assert_eq!(
1000 XpermsBitmap::ALL,
1001 avc.compute_ioctl_access_decision(
1002 A_TEST_SID.clone(),
1003 A_TEST_SID.clone(),
1004 KernelClass::Process.into(),
1005 0x0,
1006 )
1007 .allow
1008 );
1009 assert_eq!(1, avc.delegate.query_count());
1010 assert_eq!(
1012 XpermsBitmap::ALL,
1013 avc.compute_ioctl_access_decision(
1014 A_TEST_SID.clone(),
1015 A_TEST_SID.clone(),
1016 KernelClass::Process.into(),
1017 0x0
1018 )
1019 .allow
1020 );
1021 assert_eq!(1, avc.delegate.query_count());
1022 }
1023
1024 #[test]
1025 fn access_vector_cache_ioctl_miss() {
1026 let mut avc = FifoQueryCache::<_>::new(TestDelegate::default(), TEST_CAPACITY);
1027
1028 avc.compute_ioctl_access_decision(
1030 A_TEST_SID.clone(),
1031 A_TEST_SID.clone(),
1032 KernelClass::Process.into(),
1033 0x0,
1034 );
1035
1036 for ioctl_prefix in 0x1..(1 + avc.ioctl_access_cache.capacity())
1038 .try_into()
1039 .expect("assumed that test ioctl cache capacity was < 255")
1040 {
1041 avc.compute_ioctl_access_decision(
1042 A_TEST_SID.clone(),
1043 A_TEST_SID.clone(),
1044 KernelClass::Process.into(),
1045 ioctl_prefix,
1046 );
1047 }
1048 assert!(avc.delegate.query_count() > 1);
1051 assert!(avc.ioctl_access_cache_is_full());
1052 let delegate_query_count = avc.delegate.query_count();
1053
1054 avc.compute_ioctl_access_decision(
1056 A_TEST_SID.clone(),
1057 A_TEST_SID.clone(),
1058 KernelClass::Process.into(),
1059 0x0,
1060 );
1061 assert_eq!(delegate_query_count + 1, avc.delegate.query_count());
1062 }
1063}
1064
1065#[cfg(test)]
1067#[cfg(feature = "selinux_starnix")]
1068mod starnix_tests {
1069 use super::testing::*;
1070 use super::*;
1071 use crate::policy::testing::{ACCESS_VECTOR_0001, ACCESS_VECTOR_0010};
1072 use crate::policy::AccessVector;
1073 use crate::KernelClass;
1074
1075 use rand::distributions::Uniform;
1076 use rand::{thread_rng, Rng as _};
1077 use std::collections::{HashMap, HashSet};
1078 use std::sync::atomic::AtomicU32;
1079 use std::thread::spawn;
1080
1081 const NO_RIGHTS: u32 = 0;
1082 const READ_RIGHTS: u32 = 1;
1083 const WRITE_RIGHTS: u32 = 2;
1084
1085 const ACCESS_VECTOR_READ: AccessDecision = AccessDecision::allow(ACCESS_VECTOR_0001);
1086 const ACCESS_VECTOR_WRITE: AccessDecision = AccessDecision::allow(ACCESS_VECTOR_0010);
1087
1088 struct PolicyServer {
1089 policy: Arc<AtomicU32>,
1090 }
1091
1092 impl PolicyServer {
1093 fn set_policy(&self, policy: u32) {
1094 if policy > 2 {
1095 panic!("attempt to set policy to invalid value: {}", policy);
1096 }
1097 self.policy.as_ref().store(policy, Ordering::Relaxed);
1098 }
1099 }
1100
1101 impl Query for PolicyServer {
1102 fn compute_access_decision(
1103 &self,
1104 _source_sid: SecurityId,
1105 _target_sid: SecurityId,
1106 _target_class: ObjectClass,
1107 ) -> AccessDecision {
1108 let policy = self.policy.as_ref().load(Ordering::Relaxed);
1109 if policy == NO_RIGHTS {
1110 AccessDecision::default()
1111 } else if policy == READ_RIGHTS {
1112 ACCESS_VECTOR_READ
1113 } else if policy == WRITE_RIGHTS {
1114 ACCESS_VECTOR_WRITE
1115 } else {
1116 panic!("compute_access_decision found invalid policy: {}", policy);
1117 }
1118 }
1119
1120 fn compute_new_fs_node_sid(
1121 &self,
1122 _source_sid: SecurityId,
1123 _target_sid: SecurityId,
1124 _fs_node_class: FsNodeClass,
1125 ) -> Result<SecurityId, anyhow::Error> {
1126 unreachable!()
1127 }
1128
1129 fn compute_new_fs_node_sid_with_name(
1130 &self,
1131 _source_sid: SecurityId,
1132 _target_sid: SecurityId,
1133 _fs_node_class: FsNodeClass,
1134 _fs_node_name: NullessByteStr<'_>,
1135 ) -> Option<SecurityId> {
1136 unreachable!()
1137 }
1138
1139 fn compute_ioctl_access_decision(
1140 &self,
1141 _source_sid: SecurityId,
1142 _target_sid: SecurityId,
1143 _target_class: ObjectClass,
1144 _ioctl_prefix: u8,
1145 ) -> IoctlAccessDecision {
1146 todo!()
1147 }
1148 }
1149
1150 impl Reset for PolicyServer {
1151 fn reset(&self) -> bool {
1152 true
1153 }
1154 }
1155
1156 #[fuchsia::test]
1157 async fn thread_local_query_access_vector_cache_coherence() {
1158 for _ in 0..TEST_CAPACITY {
1159 test_thread_local_query_access_vector_cache_coherence().await
1160 }
1161 }
1162
1163 async fn test_thread_local_query_access_vector_cache_coherence() {
1165 let active_policy: Arc<AtomicU32> = Arc::new(Default::default());
1166 let policy_server: Arc<PolicyServer> =
1167 Arc::new(PolicyServer { policy: active_policy.clone() });
1168 let cache_version = Arc::new(AtomicVersion::default());
1169
1170 let fixed_avc = FifoQueryCache::<_>::new(policy_server.clone(), TEST_CAPACITY);
1171 let cache_version_for_avc = cache_version.clone();
1172 let mut query_avc = ThreadLocalQuery::new(cache_version_for_avc, fixed_avc);
1173
1174 policy_server.set_policy(NO_RIGHTS);
1175 let (tx, rx) = futures::channel::oneshot::channel();
1176 let query_thread = spawn(move || {
1177 let mut trace = vec![];
1178
1179 for _ in 0..2000 {
1180 trace.push(query_avc.compute_access_decision(
1181 A_TEST_SID.clone(),
1182 A_TEST_SID.clone(),
1183 KernelClass::Process.into(),
1184 ))
1185 }
1186
1187 tx.send(trace).expect("send trace");
1188 });
1189
1190 let policy_server = PolicyServer { policy: active_policy.clone() };
1191 let cache_version_for_read = cache_version.clone();
1192 let set_read_thread = spawn(move || {
1193 std::thread::sleep(std::time::Duration::from_micros(1));
1194 policy_server.set_policy(READ_RIGHTS);
1195 cache_version_for_read.reset();
1196 });
1197
1198 let policy_server = PolicyServer { policy: active_policy.clone() };
1199 let cache_version_for_write = cache_version;
1200 let set_write_thread = spawn(move || {
1201 std::thread::sleep(std::time::Duration::from_micros(2));
1202 policy_server.set_policy(WRITE_RIGHTS);
1203 cache_version_for_write.reset();
1204 });
1205
1206 set_read_thread.join().expect("join set-policy-to-read");
1207 set_write_thread.join().expect("join set-policy-to-write");
1208 query_thread.join().expect("join query");
1209 let trace = rx.await.expect("receive trace");
1210 let mut observed_rights: HashSet<AccessVector> = Default::default();
1211 let mut prev_rights = AccessVector::NONE;
1212 for (i, rights) in trace.into_iter().enumerate() {
1213 if i != 0 && rights.allow != prev_rights {
1214 assert!(!observed_rights.contains(&rights.allow));
1216 observed_rights.insert(rights.allow);
1217 }
1218
1219 prev_rights = rights.allow;
1220 }
1221 }
1222
1223 #[fuchsia::test]
1224 async fn locked_fixed_access_vector_cache_coherence() {
1225 for _ in 0..10 {
1226 test_locked_fixed_access_vector_cache_coherence().await
1227 }
1228 }
1229
1230 async fn test_locked_fixed_access_vector_cache_coherence() {
1232 let active_policy: Arc<AtomicU32> = Arc::new(Default::default());
1237 let policy_server = Arc::new(PolicyServer { policy: active_policy.clone() });
1238 let fixed_avc = FifoQueryCache::<_>::new(policy_server.clone(), TEST_CAPACITY);
1239 let avc = Locked::new(fixed_avc);
1240 let sids = unique_sids(30);
1241
1242 policy_server.set_policy(NO_RIGHTS);
1244
1245 let (tx_last_policy_change_1, rx_last_policy_change_1) =
1254 futures::channel::oneshot::channel();
1255 let (tx_last_policy_change_2, rx_last_policy_change_2) =
1256 futures::channel::oneshot::channel();
1257
1258 let (tx1, rx1) = futures::channel::oneshot::channel();
1262 let avc_for_query_1 = avc.clone();
1263 let sids_for_query_1 = sids.clone();
1264
1265 let query_thread_1 = spawn(|| async move {
1266 let sids = sids_for_query_1;
1267 let mut trace = vec![];
1268
1269 for i in thread_rng().sample_iter(&Uniform::new(0, 20)).take(2000) {
1270 trace.push((
1271 sids[i].clone(),
1272 avc_for_query_1.compute_access_decision(
1273 sids[i].clone(),
1274 A_TEST_SID.clone(),
1275 KernelClass::Process.into(),
1276 ),
1277 ))
1278 }
1279
1280 rx_last_policy_change_1.await.expect("receive last-policy-change signal (1)");
1281
1282 for i in thread_rng().sample_iter(&Uniform::new(0, 20)).take(10) {
1283 trace.push((
1284 sids[i].clone(),
1285 avc_for_query_1.compute_access_decision(
1286 sids[i].clone(),
1287 A_TEST_SID.clone(),
1288 KernelClass::Process.into(),
1289 ),
1290 ))
1291 }
1292
1293 tx1.send(trace).expect("send trace 1");
1294
1295 for (_, result) in avc_for_query_1.delegate.lock().access_cache.iter() {
1303 assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1304 }
1305 });
1306
1307 let (tx2, rx2) = futures::channel::oneshot::channel();
1308 let avc_for_query_2 = avc.clone();
1309 let sids_for_query_2 = sids.clone();
1310
1311 let query_thread_2 = spawn(|| async move {
1312 let sids = sids_for_query_2;
1313 let mut trace = vec![];
1314
1315 for i in thread_rng().sample_iter(&Uniform::new(10, 30)).take(2000) {
1316 trace.push((
1317 sids[i].clone(),
1318 avc_for_query_2.compute_access_decision(
1319 sids[i].clone(),
1320 A_TEST_SID.clone(),
1321 KernelClass::Process.into(),
1322 ),
1323 ))
1324 }
1325
1326 rx_last_policy_change_2.await.expect("receive last-policy-change signal (2)");
1327
1328 for i in thread_rng().sample_iter(&Uniform::new(10, 30)).take(10) {
1329 trace.push((
1330 sids[i].clone(),
1331 avc_for_query_2.compute_access_decision(
1332 sids[i].clone(),
1333 A_TEST_SID.clone(),
1334 KernelClass::Process.into(),
1335 ),
1336 ))
1337 }
1338
1339 tx2.send(trace).expect("send trace 2");
1340
1341 for (_, result) in avc_for_query_2.delegate.lock().access_cache.iter() {
1349 assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1350 }
1351 });
1352
1353 let policy_server_for_set_read = policy_server.clone();
1354 let avc_for_set_read = avc.clone();
1355 let (tx_set_read, rx_set_read) = futures::channel::oneshot::channel();
1356 let set_read_thread = spawn(move || {
1357 std::thread::sleep(std::time::Duration::from_micros(1));
1359
1360 policy_server_for_set_read.set_policy(READ_RIGHTS);
1363 avc_for_set_read.reset();
1364
1365 tx_set_read.send(true).expect("send set-read signal")
1366 });
1367
1368 let policy_server_for_set_write = policy_server.clone();
1369 let avc_for_set_write = avc;
1370 let set_write_thread = spawn(|| async move {
1371 rx_set_read.await.expect("receive set-write signal");
1373 std::thread::sleep(std::time::Duration::from_micros(1));
1374
1375 policy_server_for_set_write.set_policy(WRITE_RIGHTS);
1378 avc_for_set_write.reset();
1379
1380 tx_last_policy_change_1.send(true).expect("send last-policy-change signal (1)");
1381 tx_last_policy_change_2.send(true).expect("send last-policy-change signal (2)");
1382 });
1383
1384 set_read_thread.join().expect("join set-policy-to-read");
1386 let _ = set_write_thread.join().expect("join set-policy-to-write").await;
1387 let _ = query_thread_1.join().expect("join query").await;
1388 let _ = query_thread_2.join().expect("join query").await;
1389
1390 let trace_1 = rx1.await.expect("receive trace 1");
1392 let trace_2 = rx2.await.expect("receive trace 2");
1393
1394 for trace in [trace_1, trace_2] {
1413 let mut trace_by_sid = HashMap::<SecurityId, Vec<AccessVector>>::new();
1414 for (sid, access_decision) in trace {
1415 trace_by_sid.entry(sid).or_insert(vec![]).push(access_decision.allow);
1416 }
1417 for access_vectors in trace_by_sid.values() {
1418 let initial_rights = AccessVector::NONE;
1419 let mut prev_rights = &initial_rights;
1420 for rights in access_vectors.iter() {
1421 assert!(rights >= prev_rights);
1423 prev_rights = rights;
1424 }
1425 }
1426 }
1427 }
1428
1429 struct SecurityServer {
1430 manager: Manager<SecurityServer>,
1431 policy: Arc<AtomicU32>,
1432 }
1433
1434 impl SecurityServer {
1435 fn manager(&self) -> &Manager<SecurityServer> {
1436 &self.manager
1437 }
1438 }
1439
1440 impl Query for SecurityServer {
1441 fn compute_access_decision(
1442 &self,
1443 _source_sid: SecurityId,
1444 _target_sid: SecurityId,
1445 _target_class: ObjectClass,
1446 ) -> AccessDecision {
1447 let policy = self.policy.as_ref().load(Ordering::Relaxed);
1448 if policy == NO_RIGHTS {
1449 AccessDecision::default()
1450 } else if policy == READ_RIGHTS {
1451 ACCESS_VECTOR_READ
1452 } else if policy == WRITE_RIGHTS {
1453 ACCESS_VECTOR_WRITE
1454 } else {
1455 panic!("compute_access_decision found invalid policy: {}", policy);
1456 }
1457 }
1458
1459 fn compute_new_fs_node_sid(
1460 &self,
1461 _source_sid: SecurityId,
1462 _target_sid: SecurityId,
1463 _fs_node_class: FsNodeClass,
1464 ) -> Result<SecurityId, anyhow::Error> {
1465 unreachable!()
1466 }
1467
1468 fn compute_new_fs_node_sid_with_name(
1469 &self,
1470 _source_sid: SecurityId,
1471 _target_sid: SecurityId,
1472 _fs_node_class: FsNodeClass,
1473 _fs_node_name: NullessByteStr<'_>,
1474 ) -> Option<SecurityId> {
1475 unreachable!()
1476 }
1477
1478 fn compute_ioctl_access_decision(
1479 &self,
1480 _source_sid: SecurityId,
1481 _target_sid: SecurityId,
1482 _target_class: ObjectClass,
1483 _ioctl_prefix: u8,
1484 ) -> IoctlAccessDecision {
1485 todo!()
1486 }
1487 }
1488
1489 impl Reset for SecurityServer {
1490 fn reset(&self) -> bool {
1491 true
1492 }
1493 }
1494
1495 #[fuchsia::test]
1496 async fn manager_cache_coherence() {
1497 for _ in 0..10 {
1498 test_manager_cache_coherence().await
1499 }
1500 }
1501
1502 async fn test_manager_cache_coherence() {
1504 let (active_policy, security_server) = {
1509 let manager = Manager::new();
1513
1514 let active_policy: Arc<AtomicU32> = Arc::new(Default::default());
1516 let security_server =
1517 Arc::new(SecurityServer { manager, policy: active_policy.clone() });
1518
1519 security_server
1522 .as_ref()
1523 .manager()
1524 .set_security_server(Arc::downgrade(&security_server));
1525
1526 (active_policy, security_server)
1527 };
1528 let sids = unique_sids(30);
1529
1530 fn set_policy(owner: &Arc<AtomicU32>, policy: u32) {
1531 if policy > 2 {
1532 panic!("attempt to set policy to invalid value: {}", policy);
1533 }
1534 owner.as_ref().store(policy, Ordering::Relaxed);
1535 }
1536
1537 set_policy(&active_policy, NO_RIGHTS);
1539
1540 let (tx_last_policy_change_1, rx_last_policy_change_1) =
1549 futures::channel::oneshot::channel();
1550 let (tx_last_policy_change_2, rx_last_policy_change_2) =
1551 futures::channel::oneshot::channel();
1552
1553 let (tx1, rx1) = futures::channel::oneshot::channel();
1557 let mut avc_for_query_1 = security_server.manager().new_thread_local_cache();
1558 let sids_for_query_1 = sids.clone();
1559
1560 let query_thread_1 = spawn(|| async move {
1561 let sids = sids_for_query_1;
1562 let mut trace = vec![];
1563
1564 for i in thread_rng().sample_iter(&Uniform::new(0, 20)).take(2000) {
1565 trace.push((
1566 sids[i].clone(),
1567 avc_for_query_1.compute_access_decision(
1568 sids[i].clone(),
1569 A_TEST_SID.clone(),
1570 KernelClass::Process.into(),
1571 ),
1572 ))
1573 }
1574
1575 rx_last_policy_change_1.await.expect("receive last-policy-change signal (1)");
1576
1577 for i in thread_rng().sample_iter(&Uniform::new(0, 20)).take(10) {
1578 trace.push((
1579 sids[i].clone(),
1580 avc_for_query_1.compute_access_decision(
1581 sids[i].clone(),
1582 A_TEST_SID.clone(),
1583 KernelClass::Process.into(),
1584 ),
1585 ))
1586 }
1587
1588 tx1.send(trace).expect("send trace 1");
1589
1590 for (_, result) in avc_for_query_1.delegate.access_cache.iter() {
1598 assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1599 }
1600 });
1601
1602 let (tx2, rx2) = futures::channel::oneshot::channel();
1603 let mut avc_for_query_2 = security_server.manager().new_thread_local_cache();
1604 let sids_for_query_2 = sids.clone();
1605
1606 let query_thread_2 = spawn(|| async move {
1607 let sids = sids_for_query_2;
1608 let mut trace = vec![];
1609
1610 for i in thread_rng().sample_iter(&Uniform::new(10, 30)).take(2000) {
1611 trace.push((
1612 sids[i].clone(),
1613 avc_for_query_2.compute_access_decision(
1614 sids[i].clone(),
1615 A_TEST_SID.clone(),
1616 KernelClass::Process.into(),
1617 ),
1618 ))
1619 }
1620
1621 rx_last_policy_change_2.await.expect("receive last-policy-change signal (2)");
1622
1623 for i in thread_rng().sample_iter(&Uniform::new(10, 30)).take(10) {
1624 trace.push((
1625 sids[i].clone(),
1626 avc_for_query_2.compute_access_decision(
1627 sids[i].clone(),
1628 A_TEST_SID.clone(),
1629 KernelClass::Process.into(),
1630 ),
1631 ))
1632 }
1633
1634 tx2.send(trace).expect("send trace 2");
1635
1636 for (_, result) in avc_for_query_2.delegate.access_cache.iter() {
1644 assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1645 }
1646 });
1647
1648 let active_policy_for_set_read = active_policy.clone();
1651 let security_server_for_set_read = security_server.clone();
1652 let (tx_set_read, rx_set_read) = futures::channel::oneshot::channel();
1653 let set_read_thread = spawn(move || {
1654 std::thread::sleep(std::time::Duration::from_micros(1));
1656
1657 set_policy(&active_policy_for_set_read, READ_RIGHTS);
1660 security_server_for_set_read.manager().reset();
1661
1662 tx_set_read.send(true).expect("send set-read signal")
1663 });
1664 let active_policy_for_set_write = active_policy.clone();
1665 let security_server_for_set_write = security_server.clone();
1666 let set_write_thread = spawn(|| async move {
1667 rx_set_read.await.expect("receive set-read signal");
1669 std::thread::sleep(std::time::Duration::from_micros(1));
1670
1671 set_policy(&active_policy_for_set_write, WRITE_RIGHTS);
1674 security_server_for_set_write.manager().reset();
1675
1676 tx_last_policy_change_1.send(true).expect("send last-policy-change signal (1)");
1677 tx_last_policy_change_2.send(true).expect("send last-policy-change signal (2)");
1678 });
1679
1680 set_read_thread.join().expect("join set-policy-to-read");
1682 let _ = set_write_thread.join().expect("join set-policy-to-write").await;
1683 let _ = query_thread_1.join().expect("join query").await;
1684 let _ = query_thread_2.join().expect("join query").await;
1685
1686 let trace_1 = rx1.await.expect("receive trace 1");
1688 let trace_2 = rx2.await.expect("receive trace 2");
1689
1690 for trace in [trace_1, trace_2] {
1712 let mut trace_by_sid = HashMap::<SecurityId, Vec<AccessVector>>::new();
1713 for (sid, access_decision) in trace {
1714 trace_by_sid.entry(sid).or_insert(vec![]).push(access_decision.allow);
1715 }
1716 for access_vectors in trace_by_sid.values() {
1717 let initial_rights = AccessVector::NONE;
1718 let mut prev_rights = &initial_rights;
1719 for rights in access_vectors.iter() {
1720 assert!(rights >= prev_rights);
1722 prev_rights = rights;
1723 }
1724 }
1725 }
1726
1727 let shared_cache = security_server.manager().shared_cache.delegate.lock();
1728 for (_, result) in shared_cache.access_cache.iter() {
1729 assert_eq!(ACCESS_VECTOR_WRITE, result.access_decision);
1730 }
1731 }
1732}