1use crate::task_metrics::constants::{COMPONENT_CPU_MAX_SAMPLES, CPU_SAMPLE_PERIOD};
6use crate::task_metrics::measurement::{Measurement, MeasurementsQueue};
7use crate::task_metrics::runtime_stats_source::RuntimeStatsSource;
8use fuchsia_async as fasync;
9use fuchsia_inspect::{self as inspect, HistogramProperty, UintLinearHistogramProperty};
10use futures::future::BoxFuture;
11use futures::lock::Mutex;
12use futures::FutureExt;
13use injectable_time::TimeSource;
14use log::debug;
15use moniker::ExtendedMoniker;
16use std::fmt::Debug;
17use std::sync::{Arc, Weak};
18use zx::sys::{self as zx_sys, zx_system_get_num_cpus};
19
20pub(crate) fn create_cpu_histogram(
21 node: &inspect::Node,
22 moniker: &ExtendedMoniker,
23) -> inspect::UintLinearHistogramProperty {
24 node.create_uint_linear_histogram(
25 moniker.to_string(),
26 inspect::LinearHistogramParams { floor: 1, step_size: 1, buckets: 99 },
27 )
28}
29
30fn num_cpus() -> i64 {
31 (unsafe { zx_system_get_num_cpus() }) as i64
34}
35
36#[derive(Debug)]
37pub(crate) enum TaskState<T: RuntimeStatsSource + Debug> {
38 TerminatedAndMeasured,
39 Terminated(T),
40 Alive(T),
41}
42
43impl<T> From<T> for TaskState<T>
44where
45 T: RuntimeStatsSource + Debug,
46{
47 fn from(task: T) -> TaskState<T> {
48 TaskState::Alive(task)
49 }
50}
51
52#[derive(Debug)]
53pub struct TaskInfo<T: RuntimeStatsSource + Debug> {
54 koid: zx_sys::zx_koid_t,
55 pub(crate) task: Arc<Mutex<TaskState<T>>>,
56 pub(crate) time_source: Arc<dyn TimeSource + Sync + Send>,
57 pub(crate) has_parent_task: bool,
58 pub(crate) measurements: MeasurementsQueue,
59 exited_cpu: Option<Measurement>,
60 histogram: Option<UintLinearHistogramProperty>,
61 previous_cpu: zx::MonotonicDuration,
62 previous_histogram_timestamp: i64,
63 cpu_cores: i64,
64 sample_period: std::time::Duration,
65 children: Vec<Weak<Mutex<TaskInfo<T>>>>,
66 _terminated_task: fasync::Task<()>,
67 pub(crate) most_recent_measurement_nanos: Arc<Mutex<Option<i64>>>,
68}
69
70impl<T: 'static + RuntimeStatsSource + Debug + Send + Sync> TaskInfo<T> {
71 pub fn try_from(
75 task: T,
76 histogram: Option<UintLinearHistogramProperty>,
77 time_source: Arc<dyn TimeSource + Sync + Send>,
78 ) -> Result<Self, zx::Status> {
79 Self::try_from_internal(task, histogram, time_source, CPU_SAMPLE_PERIOD, num_cpus())
80 }
81}
82
83impl<T: 'static + RuntimeStatsSource + Debug + Send + Sync> TaskInfo<T> {
84 fn try_from_internal(
86 task: T,
87 histogram: Option<UintLinearHistogramProperty>,
88 time_source: Arc<dyn TimeSource + Sync + Send>,
89 sample_period: std::time::Duration,
90 cpu_cores: i64,
91 ) -> Result<Self, zx::Status> {
92 let koid = task.koid()?;
93 let maybe_handle = task.handle_ref().duplicate(zx::Rights::SAME_RIGHTS).ok();
94 let task_state = Arc::new(Mutex::new(TaskState::from(task)));
95 let weak_task_state = Arc::downgrade(&task_state);
96 let most_recent_measurement_nanos = Arc::new(Mutex::new(None));
97 let movable_most_recent_measurement_nanos = most_recent_measurement_nanos.clone();
98 let movable_time_source = time_source.clone();
99 let _terminated_task = fasync::Task::spawn(async move {
100 if let Some(handle) = maybe_handle {
101 fasync::OnSignals::new(&handle, zx::Signals::TASK_TERMINATED)
102 .await
103 .map(|_: fidl::Signals| ()) .unwrap_or_else(|error| debug!(error:%; "error creating signal handler"));
105 }
106
107 if let Some(task_state) = weak_task_state.upgrade() {
110 let mut terminated_at_nanos_guard =
111 movable_most_recent_measurement_nanos.lock().await;
112 *terminated_at_nanos_guard = Some(movable_time_source.now());
113 let mut state = task_state.lock().await;
114 *state = match std::mem::replace(&mut *state, TaskState::TerminatedAndMeasured) {
115 s @ TaskState::TerminatedAndMeasured => s,
116 TaskState::Alive(t) => TaskState::Terminated(t),
117 s @ TaskState::Terminated(_) => s,
118 };
119 }
120 });
121 Ok(Self {
122 koid,
123 task: task_state,
124 has_parent_task: false,
125 measurements: MeasurementsQueue::new(COMPONENT_CPU_MAX_SAMPLES, time_source.clone()),
126 children: vec![],
127 cpu_cores,
128 sample_period,
129 histogram,
130 previous_cpu: zx::MonotonicDuration::from_nanos(0),
131 previous_histogram_timestamp: time_source.now(),
132 time_source,
133 _terminated_task,
134 most_recent_measurement_nanos,
135 exited_cpu: None,
136 })
137 }
138
139 pub async fn measure_if_no_parent(&mut self) -> Option<&Measurement> {
143 if self.has_parent_task {
146 return None;
147 }
148
149 self.measure_subtree().await
150 }
151
152 pub fn add_child(&mut self, task: Weak<Mutex<TaskInfo<T>>>) {
154 self.children.push(task);
155 }
156
157 pub async fn most_recent_measurement(&self) -> Option<zx::BootInstant> {
158 self.most_recent_measurement_nanos.lock().await.map(|t| zx::BootInstant::from_nanos(t))
159 }
160
161 pub async fn take_measurements_queue(&mut self) -> Result<MeasurementsQueue, ()> {
165 match &*self.task.lock().await {
166 TaskState::TerminatedAndMeasured | TaskState::Terminated(_) => Ok(std::mem::replace(
167 &mut self.measurements,
168 MeasurementsQueue::new(COMPONENT_CPU_MAX_SAMPLES, self.time_source.clone()),
169 )),
170 _ => Err(()),
171 }
172 }
173
174 pub fn record_measurement_with_start_time(&mut self, t: zx::BootInstant) {
178 self.record_measurement(Measurement::empty(t));
179 }
180
181 fn record_measurement(&mut self, m: Measurement) {
182 let current_cpu = *m.cpu_time();
183 self.add_to_histogram(current_cpu - self.previous_cpu, *m.timestamp());
184 self.previous_cpu = current_cpu;
185 self.measurements.insert(m);
186 }
187
188 fn measure_subtree<'a>(&'a mut self) -> BoxFuture<'a, Option<&'a Measurement>> {
189 async move {
190 let (task_terminated_can_measure, runtime_info_res) = {
191 let mut guard = self.task.lock().await;
192 match &*guard {
193 TaskState::TerminatedAndMeasured => {
194 self.measurements.insert_post_invalidation();
195 return None;
196 }
197 TaskState::Terminated(task) => {
198 let result = task.get_runtime_info().await;
199 *guard = TaskState::TerminatedAndMeasured;
200 let mut terminated_at_nanos_guard =
201 self.most_recent_measurement_nanos.lock().await;
202 *terminated_at_nanos_guard = Some(self.time_source.now());
203 (true, result)
204 }
205 TaskState::Alive(task) => (false, task.get_runtime_info().await),
206 }
207 };
208 if let Ok(runtime_info) = runtime_info_res {
209 let mut measurement = Measurement::from_runtime_info(
210 runtime_info,
211 zx::BootInstant::from_nanos(self.time_source.now()),
212 );
213 let mut alive_children = vec![];
215 while let Some(weak_child) = self.children.pop() {
216 if let Some(child) = weak_child.upgrade() {
217 let mut child_guard = child.lock().await;
218 if let Some(child_measurement) = child_guard.measure_subtree().await {
219 measurement -= child_measurement;
220 }
221 if child_guard.is_alive().await {
222 alive_children.push(weak_child);
223 }
224 }
225 }
226 self.children = alive_children;
227 self.record_measurement(measurement);
228
229 if task_terminated_can_measure {
230 self.exited_cpu = self.measurements.most_recent_measurement().cloned();
231 return None;
232 }
233 return self.measurements.most_recent_measurement();
234 }
235 None
236 }
237 .boxed()
238 }
239
240 fn add_to_histogram(
242 &mut self,
243 cpu_time_delta: zx::MonotonicDuration,
244 timestamp: zx::BootInstant,
245 ) {
246 if let Some(histogram) = &self.histogram {
247 let time_value: i64 = timestamp.into_nanos();
248 let elapsed_time = time_value - self.previous_histogram_timestamp;
249 self.previous_histogram_timestamp = time_value;
250 if elapsed_time < ((self.sample_period.as_nanos() as i64) * 9 / 10) {
251 return;
252 }
253 let available_core_time = elapsed_time * self.cpu_cores;
254 if available_core_time != 0 {
255 let cpu_numerator =
257 (cpu_time_delta.into_nanos() as i64) * 100 + available_core_time - 1;
258 histogram.insert((cpu_numerator / available_core_time) as u64);
259 }
260 }
261 }
262
263 pub async fn is_alive(&self) -> bool {
267 let task_state_terminated_and_measured =
268 matches!(*self.task.lock().await, TaskState::TerminatedAndMeasured);
269 let task_has_real_measurements = !self.measurements.no_true_measurements();
270
271 !task_state_terminated_and_measured || task_has_real_measurements
272 }
273
274 pub async fn exited_cpu(&self) -> Option<&Measurement> {
275 self.exited_cpu.as_ref()
276 }
277
278 pub fn record_to_node(&self, parent: &inspect::Node) {
280 let node = parent.create_child(self.koid.to_string());
281 self.measurements.record_to_node(&node);
282 parent.record(node);
283 }
284
285 pub fn koid(&self) -> zx_sys::zx_koid_t {
286 self.koid
287 }
288
289 #[cfg(test)]
290 pub fn total_measurements(&self) -> usize {
291 self.measurements.true_measurement_count()
292 }
293}
294
295#[cfg(test)]
296mod tests {
297 use super::*;
298 use crate::task_metrics::testing::FakeTask;
299 use assert_matches::assert_matches;
300 use diagnostics_assertions::assert_data_tree;
301 use diagnostics_hierarchy::{ArrayContent, DiagnosticsHierarchyGetter, LinearHistogram};
302 use injectable_time::FakeTime;
303
304 async fn take_measurement_then_tick_clock<
305 'a,
306 T: 'static + RuntimeStatsSource + Debug + Send + Sync,
307 >(
308 ti: &'a mut TaskInfo<T>,
309 clock: &Arc<FakeTime>,
310 ) -> Option<&'a Measurement> {
311 let m = ti.measure_if_no_parent().await;
312 clock.add_ticks(CPU_SAMPLE_PERIOD.as_nanos() as i64);
313 m
314 }
315
316 #[fuchsia::test]
317 async fn rotates_measurements_per_task() {
318 let clock = Arc::new(FakeTime::new());
320 let mut task: TaskInfo<FakeTask> =
321 TaskInfo::try_from(FakeTask::default(), None , clock.clone()).unwrap();
322 assert!(task.is_alive().await);
323
324 take_measurement_then_tick_clock(&mut task, &clock).await;
326 assert_eq!(task.measurements.true_measurement_count(), 1);
327 take_measurement_then_tick_clock(&mut task, &clock).await;
328 assert_eq!(task.measurements.true_measurement_count(), 2);
329 take_measurement_then_tick_clock(&mut task, &clock).await;
330 assert!(task.is_alive().await);
331 assert_eq!(task.measurements.true_measurement_count(), 3);
332
333 task.force_terminate().await;
335
336 take_measurement_then_tick_clock(&mut task, &clock).await;
339 assert_eq!(task.measurements.true_measurement_count(), 4);
340 assert_matches!(*task.task.lock().await, TaskState::TerminatedAndMeasured);
341
342 for _ in 4..COMPONENT_CPU_MAX_SAMPLES {
343 take_measurement_then_tick_clock(&mut task, &clock).await;
344 assert_eq!(task.measurements.true_measurement_count(), 4);
345 }
346
347 take_measurement_then_tick_clock(&mut task, &clock).await; assert!(task.is_alive().await);
349 assert_eq!(task.measurements.true_measurement_count(), 3);
350 take_measurement_then_tick_clock(&mut task, &clock).await; assert!(task.is_alive().await);
352 assert_eq!(task.measurements.true_measurement_count(), 2);
353 take_measurement_then_tick_clock(&mut task, &clock).await; assert!(task.is_alive().await);
355 assert_eq!(task.measurements.true_measurement_count(), 1);
356
357 take_measurement_then_tick_clock(&mut task, &clock).await; assert!(!task.is_alive().await);
360 assert_eq!(task.measurements.true_measurement_count(), 0);
361 }
362
363 #[fuchsia::test]
364 async fn write_inspect() {
365 let time = Arc::new(FakeTime::new());
366 let mut task = TaskInfo::try_from(
367 FakeTask::new(
368 1,
369 vec![
370 zx::TaskRuntimeInfo {
371 cpu_time: 2,
372 queue_time: 4,
373 ..zx::TaskRuntimeInfo::default()
374 },
375 zx::TaskRuntimeInfo {
376 cpu_time: 6,
377 queue_time: 8,
378 ..zx::TaskRuntimeInfo::default()
379 },
380 ],
381 ),
382 None, time.clone(),
384 )
385 .unwrap();
386
387 time.set_ticks(1);
388 task.measure_if_no_parent().await;
389 time.set_ticks(2);
390 task.measure_if_no_parent().await;
391
392 let inspector = inspect::Inspector::default();
393 task.record_to_node(inspector.root());
394 assert_data_tree!(inspector, root: {
395 "1": {
396 timestamps: vec![1i64, 2],
397 cpu_times: vec![2i64, 6],
398 queue_times: vec![4i64, 8],
399 }
400 });
401 }
402
403 #[fuchsia::test]
404 async fn write_more_than_max_samples() {
405 let inspector = inspect::Inspector::default();
406 let clock = Arc::new(FakeTime::new());
407 let mut task = TaskInfo::try_from(
408 FakeTask::new(
409 1,
410 vec![
411 zx::TaskRuntimeInfo {
412 cpu_time: 2,
413 queue_time: 4,
414 ..zx::TaskRuntimeInfo::default()
415 },
416 zx::TaskRuntimeInfo {
417 cpu_time: 6,
418 queue_time: 8,
419 ..zx::TaskRuntimeInfo::default()
420 },
421 ],
422 ),
423 None, clock.clone(),
425 )
426 .unwrap();
427
428 for _ in 0..(COMPONENT_CPU_MAX_SAMPLES + 10) {
429 assert!(take_measurement_then_tick_clock(&mut task, &clock).await.is_some());
430 }
431
432 assert_eq!(task.measurements.true_measurement_count(), COMPONENT_CPU_MAX_SAMPLES);
433 task.record_to_node(inspector.root());
434 assert_eq!(60, COMPONENT_CPU_MAX_SAMPLES);
435 assert_eq!(task.measurements.true_measurement_count(), 60);
436
437 let hierarchy = inspector.get_diagnostics_hierarchy();
438 for top_level in &hierarchy.children {
439 let child = hierarchy.get_child(&top_level.name).unwrap();
440 let timestamps = child.get_property("timestamps").unwrap().int_array().unwrap();
441 assert_eq!(timestamps.len(), COMPONENT_CPU_MAX_SAMPLES);
442 let cpu_times = child.get_property("cpu_times").unwrap().int_array().unwrap();
443 assert_eq!(cpu_times.len(), COMPONENT_CPU_MAX_SAMPLES);
444 let queue_times = child.get_property("queue_times").unwrap().int_array().unwrap();
445 assert_eq!(queue_times.len(), COMPONENT_CPU_MAX_SAMPLES);
446 }
447 }
448
449 #[fuchsia::test]
450 async fn more_than_max_samples_offset_time() {
451 let inspector = inspect::Inspector::default();
452 let clock = Arc::new(FakeTime::new());
453 let mut task = TaskInfo::try_from(
454 FakeTask::new(
455 1,
456 vec![
457 zx::TaskRuntimeInfo {
458 cpu_time: 2,
459 queue_time: 4,
460 ..zx::TaskRuntimeInfo::default()
461 },
462 zx::TaskRuntimeInfo {
463 cpu_time: 6,
464 queue_time: 8,
465 ..zx::TaskRuntimeInfo::default()
466 },
467 ],
468 ),
469 None, clock.clone(),
471 )
472 .unwrap();
473
474 for _ in 0..COMPONENT_CPU_MAX_SAMPLES {
475 assert!(take_measurement_then_tick_clock(&mut task, &clock).await.is_some());
476 }
477
478 task.measure_if_no_parent().await;
479
480 clock.add_ticks((CPU_SAMPLE_PERIOD - std::time::Duration::from_secs(1)).as_nanos() as i64);
483 task.measure_if_no_parent().await;
484
485 assert_eq!(task.measurements.true_measurement_count(), COMPONENT_CPU_MAX_SAMPLES);
486 task.record_to_node(inspector.root());
487 }
488
489 #[fuchsia::test]
490 async fn measure_with_children() {
491 let clock = Arc::new(FakeTime::new());
492 let mut task = TaskInfo::try_from(
493 FakeTask::new(
494 1,
495 vec![
496 zx::TaskRuntimeInfo {
497 cpu_time: 100,
498 queue_time: 200,
499 ..zx::TaskRuntimeInfo::default()
500 },
501 zx::TaskRuntimeInfo {
502 cpu_time: 300,
503 queue_time: 400,
504 ..zx::TaskRuntimeInfo::default()
505 },
506 ],
507 ),
508 None, clock.clone(),
510 )
511 .unwrap();
512
513 let child_1 = Arc::new(Mutex::new(
514 TaskInfo::try_from(
515 FakeTask::new(
516 2,
517 vec![
518 zx::TaskRuntimeInfo {
519 cpu_time: 10,
520 queue_time: 20,
521 ..zx::TaskRuntimeInfo::default()
522 },
523 zx::TaskRuntimeInfo {
524 cpu_time: 30,
525 queue_time: 40,
526 ..zx::TaskRuntimeInfo::default()
527 },
528 ],
529 ),
530 None, clock.clone(),
532 )
533 .unwrap(),
534 ));
535
536 let child_2 = Arc::new(Mutex::new(
537 TaskInfo::try_from(
538 FakeTask::new(
539 3,
540 vec![
541 zx::TaskRuntimeInfo {
542 cpu_time: 5,
543 queue_time: 2,
544 ..zx::TaskRuntimeInfo::default()
545 },
546 zx::TaskRuntimeInfo {
547 cpu_time: 15,
548 queue_time: 4,
549 ..zx::TaskRuntimeInfo::default()
550 },
551 ],
552 ),
553 None, clock.clone(),
555 )
556 .unwrap(),
557 ));
558
559 task.add_child(Arc::downgrade(&child_1));
560 task.add_child(Arc::downgrade(&child_2));
561
562 {
563 let measurement = take_measurement_then_tick_clock(&mut task, &clock).await.unwrap();
564 assert_eq!(measurement.cpu_time().into_nanos(), 100 - 10 - 5);
565 assert_eq!(measurement.queue_time().into_nanos(), 200 - 20 - 2);
566 }
567 assert_eq!(child_1.lock().await.total_measurements(), 1);
568 assert_eq!(child_2.lock().await.total_measurements(), 1);
569
570 {
572 let mut child_2_guard = child_2.lock().await;
573 child_2_guard.task = Arc::new(Mutex::new(TaskState::TerminatedAndMeasured));
574 child_2_guard.measurements =
575 MeasurementsQueue::new(COMPONENT_CPU_MAX_SAMPLES, clock.clone());
576 }
577
578 assert_eq!(task.children.len(), 2);
579 {
580 let measurement = take_measurement_then_tick_clock(&mut task, &clock).await.unwrap();
581 assert_eq!(measurement.cpu_time().into_nanos(), 300 - 30);
582 assert_eq!(measurement.queue_time().into_nanos(), 400 - 40);
583 }
584
585 assert_eq!(task.children.len(), 1); assert_eq!(child_1.lock().await.total_measurements(), 2);
587 }
588
589 type BucketPairs = Vec<(i64, i64)>;
590
591 use diagnostics_hierarchy::Property;
592
593 fn linear_histogram_non_zero_values(inspector: &inspect::Inspector) -> BucketPairs {
595 let mut output = vec![];
596 let hierarchy = inspector.get_diagnostics_hierarchy();
597 let histogram = hierarchy.get_property_by_path(&["foo"]).unwrap();
598 if let Property::UintArray(_, data) = histogram {
599 if let ArrayContent::LinearHistogram(LinearHistogram { counts, indexes, .. }) = data {
600 match indexes {
601 None => {
602 for (index, count) in counts.iter().enumerate() {
603 if *count > 0 && *count <= i64::MAX as u64 {
604 output.push((index as i64, *count as i64));
605 }
606 }
607 }
608 Some(indexes) => {
609 for (index, count) in indexes.iter().zip(counts.iter()) {
610 if *count > 0
611 && *count <= i64::MAX as u64
612 && *index <= i64::MAX as usize
613 {
614 output.push((*index as i64, *count as i64));
615 }
616 }
617 }
618 }
619 }
620 }
621 output
622 }
623
624 fn fake_readings(id: u64, cpu_deltas: Vec<u64>) -> FakeTask {
625 let mut cpu_time = 0i64;
626 let mut readings = vec![];
627 for delta in cpu_deltas.iter() {
628 cpu_time += *delta as i64;
629 readings.push(zx::TaskRuntimeInfo { cpu_time, ..zx::TaskRuntimeInfo::default() })
630 }
631 FakeTask::new(id, readings)
632 }
633
634 #[fuchsia::test]
636 async fn bucket_cutoffs() {
637 let readings = fake_readings(1, vec![1, 0, 500, 989, 990, 991, 999, 0]);
638 let inspector = inspect::Inspector::default();
639 let clock = FakeTime::new();
640 let histogram =
641 create_cpu_histogram(&inspector.root(), &ExtendedMoniker::parse_str("foo").unwrap());
642 let mut task = TaskInfo::try_from_internal(
644 readings,
645 Some(histogram),
646 Arc::new(clock.clone()),
647 std::time::Duration::from_nanos(1000),
648 1, )
650 .unwrap();
651
652 clock.add_ticks(1000);
653 task.measure_if_no_parent().await; let answer = vec![(1, 1)];
655 assert_eq!(linear_histogram_non_zero_values(&inspector), answer);
656
657 clock.add_ticks(1000);
658 task.measure_if_no_parent().await; let answer = vec![(0, 1), (1, 1)];
660 assert_eq!(linear_histogram_non_zero_values(&inspector), answer);
661
662 clock.add_ticks(1000);
663 task.measure_if_no_parent().await; let answer = vec![(0, 1), (1, 1), (50, 1)];
665 assert_eq!(linear_histogram_non_zero_values(&inspector), answer);
666
667 clock.add_ticks(1000);
668 task.measure_if_no_parent().await; let answer = vec![(0, 1), (1, 1), (50, 1), (99, 1)];
670 assert_eq!(linear_histogram_non_zero_values(&inspector), answer);
671
672 clock.add_ticks(1000);
673 task.measure_if_no_parent().await; let answer = vec![(0, 1), (1, 1), (50, 1), (99, 2)];
675 assert_eq!(linear_histogram_non_zero_values(&inspector), answer);
676
677 clock.add_ticks(1000);
678 task.measure_if_no_parent().await; let answer = vec![(0, 1), (1, 1), (50, 1), (99, 2), (100, 1)];
680 assert_eq!(linear_histogram_non_zero_values(&inspector), answer);
681
682 clock.add_ticks(1000);
683 task.measure_if_no_parent().await; let answer = vec![(0, 1), (1, 1), (50, 1), (99, 2), (100, 2)];
685 assert_eq!(linear_histogram_non_zero_values(&inspector), answer);
686
687 clock.add_ticks(1000);
688 task.measure_if_no_parent().await; let answer = vec![(0, 2), (1, 1), (50, 1), (99, 2), (100, 2)];
690 assert_eq!(linear_histogram_non_zero_values(&inspector), answer);
691 }
692
693 #[fuchsia::test]
697 async fn discard_short_intervals() {
698 let readings = fake_readings(1, vec![100, 100, 100, 100]);
699 let inspector = inspect::Inspector::default();
700 let clock = FakeTime::new();
701 let histogram =
702 create_cpu_histogram(&inspector.root(), &ExtendedMoniker::parse_str("foo").unwrap());
703 let mut task = TaskInfo::try_from_internal(
704 readings,
705 Some(histogram),
706 Arc::new(clock.clone()),
707 std::time::Duration::from_nanos(1000),
708 1, )
710 .unwrap();
711
712 assert_eq!(linear_histogram_non_zero_values(&inspector), vec![]);
713
714 clock.add_ticks(900);
715 task.measure_if_no_parent().await;
716 assert_eq!(linear_histogram_non_zero_values(&inspector), vec![(12, 1)]);
717
718 clock.add_ticks(899);
719 task.measure_if_no_parent().await;
720 assert_eq!(linear_histogram_non_zero_values(&inspector), vec![(12, 1)]); clock.add_ticks(2000);
723 task.measure_if_no_parent().await;
724 assert_eq!(linear_histogram_non_zero_values(&inspector), (vec![(5, 1), (12, 1)]));
725
726 clock.add_ticks(1000);
727 task.measure_if_no_parent().await;
728 assert_eq!(linear_histogram_non_zero_values(&inspector), (vec![(5, 1), (10, 1), (12, 1)]));
729 }
730
731 #[fuchsia::test]
734 async fn divide_by_cores() {
735 let readings = fake_readings(1, vec![400]);
736 let inspector = inspect::Inspector::default();
737 let clock = FakeTime::new();
738 let histogram =
739 create_cpu_histogram(&inspector.root(), &ExtendedMoniker::parse_str("foo").unwrap());
740 let mut task = TaskInfo::try_from_internal(
741 readings,
742 Some(histogram),
743 Arc::new(clock.clone()),
744 std::time::Duration::from_nanos(1000),
745 4, )
747 .unwrap();
748
749 assert_eq!(linear_histogram_non_zero_values(&inspector), vec![]);
750
751 clock.add_ticks(1000);
752 task.measure_if_no_parent().await;
753 assert_eq!(linear_histogram_non_zero_values(&inspector), vec![(10, 1)]);
754 }
755}