1mod emu;
29mod timers;
30
31use crate::emu::EmulationTimerOps;
32use anyhow::{Context, Result, anyhow};
33use async_trait::async_trait;
34use fidl::HandleBased;
35use fidl::encoding::ProxyChannelBox;
36use fidl::endpoints::RequestStream;
37use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
38use fuchsia_inspect::{IntProperty, NumericProperty, Property};
39use futures::channel::mpsc;
40use futures::sink::SinkExt;
41use futures::{StreamExt, TryStreamExt};
42use log::{debug, error, warn};
43use scopeguard::defer;
44use std::cell::RefCell;
45use std::rc::Rc;
46use std::sync::LazyLock;
47use time_pretty::{MSEC_IN_NANOS, format_duration, format_timer};
48use zx::AsHandleRef;
49use {
50 fidl_fuchsia_hardware_hrtimer as ffhh, fidl_fuchsia_time_alarms as fta,
51 fuchsia_async as fasync, fuchsia_inspect as finspect, fuchsia_runtime as fxr,
52 fuchsia_trace as trace,
53};
54
55static I64_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i64::MAX.try_into().expect("infallible"));
56static I32_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i32::MAX.try_into().expect("infallible"));
57
58static MAX_USEFUL_TICKS: LazyLock<u64> = LazyLock::new(|| *I32_MAX_AS_U64);
60
61static MIN_USEFUL_TICKS: u64 = 1;
65
66const MAIN_TIMER_ID: usize = 6;
69
70const LONG_DELAY_NANOS: i64 = 2000 * MSEC_IN_NANOS;
72
73macro_rules! log_long_op {
76 ($fut:expr) => {{
77 use futures::FutureExt;
78 let fut = $fut;
79 futures::pin_mut!(fut);
80 loop {
81 let timeout = fasync::Timer::new(std::time::Duration::from_secs(30));
82 futures::select! {
83 res = fut.as_mut().fuse() => {
84 break res;
85 }
86 _ = timeout.fuse() => {
87 warn!("unexpected blocking: long-running async operation at {}:{}", file!(), line!());
88 #[cfg(all(target_os = "fuchsia", not(doc)))]
89 ::debug::backtrace_request_all_threads();
90 }
91 }
92 }
93 }};
94}
95
96struct ScopedInc<'a> {
98 property: &'a IntProperty,
99}
100
101impl<'a> ScopedInc<'a> {
102 fn new(property: &'a IntProperty) -> Self {
103 property.add(1);
104 Self { property }
105 }
106}
107
108impl<'a> Drop for ScopedInc<'a> {
109 fn drop(&mut self) {
110 self.property.add(-1);
111 }
112}
113
114fn is_deadline_changed(
117 before: Option<fasync::BootInstant>,
118 after: Option<fasync::BootInstant>,
119) -> bool {
120 match (before, after) {
121 (None, None) => false,
122 (None, Some(_)) | (Some(_), None) => true,
123 (Some(before), Some(after)) => before != after,
124 }
125}
126
127#[derive(Debug)]
129pub(crate) enum TimerOpsError {
130 Driver(ffhh::DriverError),
132 Fidl(fidl::Error),
134}
135
136trait SawResponseFut: std::future::Future<Output = Result<zx::EventPair, TimerOpsError>> {
137 }
139
140#[async_trait(?Send)]
142pub(crate) trait TimerOps {
143 async fn stop(&self, id: u64);
145
146 async fn get_timer_properties(&self) -> TimerConfig;
149
150 fn start_and_wait(
155 &self,
156 id: u64,
157 resolution: &ffhh::Resolution,
158 ticks: u64,
159 setup_event: zx::Event,
160 ) -> std::pin::Pin<Box<dyn SawResponseFut>>;
161}
162
163struct HardwareTimerOps {
165 proxy: ffhh::DeviceProxy,
166}
167
168impl HardwareTimerOps {
169 fn new(proxy: ffhh::DeviceProxy) -> Box<Self> {
170 Box::new(Self { proxy })
171 }
172}
173
174#[async_trait(?Send)]
175impl TimerOps for HardwareTimerOps {
176 async fn stop(&self, id: u64) {
177 let _ = self
178 .proxy
179 .stop(id)
180 .await
181 .map(|result| {
182 let _ = result.map_err(|e| warn!("stop_hrtimer: driver error: {:?}", e));
183 })
184 .map_err(|e| warn!("stop_hrtimer: could not stop prior timer: {}", e));
185 }
186
187 async fn get_timer_properties(&self) -> TimerConfig {
188 match log_long_op!(self.proxy.get_properties()) {
189 Ok(p) => {
190 let timers_properties = &p.timers_properties.expect("timers_properties must exist");
191 debug!("get_timer_properties: got: {:?}", timers_properties);
192
193 let timer_index = if timers_properties.len() > MAIN_TIMER_ID {
195 MAIN_TIMER_ID
198 } else if timers_properties.len() > 0 {
199 0
203 } else {
204 return TimerConfig::new_empty();
206 };
207 let main_timer_properties = &timers_properties[timer_index];
208 debug!("alarms: main_timer_properties: {:?}", main_timer_properties);
209 let max_ticks: u64 = std::cmp::min(
211 main_timer_properties.max_ticks.unwrap_or(*MAX_USEFUL_TICKS),
212 *MAX_USEFUL_TICKS,
213 );
214 let resolutions = &main_timer_properties
215 .supported_resolutions
216 .as_ref()
217 .expect("supported_resolutions is populated")
218 .iter()
219 .last() .map(|r| match *r {
221 ffhh::Resolution::Duration(d) => d,
222 _ => {
223 error!(
224 "get_timer_properties: Unknown resolution type, returning millisecond."
225 );
226 MSEC_IN_NANOS
227 }
228 })
229 .map(|d| zx::BootDuration::from_nanos(d))
230 .into_iter() .collect::<Vec<_>>();
232 let timer_id = main_timer_properties.id.expect("timer ID is always present");
233 TimerConfig::new_from_data(timer_id, resolutions, max_ticks)
234 }
235 Err(e) => {
236 error!("could not get timer properties: {:?}", e);
237 TimerConfig::new_empty()
238 }
239 }
240 }
241
242 fn start_and_wait(
243 &self,
244 id: u64,
245 resolution: &ffhh::Resolution,
246 ticks: u64,
247 setup_event: zx::Event,
248 ) -> std::pin::Pin<Box<dyn SawResponseFut>> {
249 let inner = self.proxy.start_and_wait(id, resolution, ticks, setup_event);
250 Box::pin(HwResponseFut { pinner: Box::pin(inner) })
251 }
252}
253
254struct HwResponseFut {
257 pinner: std::pin::Pin<
258 Box<
259 fidl::client::QueryResponseFut<
260 ffhh::DeviceStartAndWaitResult,
261 fidl::encoding::DefaultFuchsiaResourceDialect,
262 >,
263 >,
264 >,
265}
266
267use std::task::Poll;
268impl SawResponseFut for HwResponseFut {}
269impl std::future::Future for HwResponseFut {
270 type Output = Result<zx::EventPair, TimerOpsError>;
271 fn poll(
272 mut self: std::pin::Pin<&mut Self>,
273 cx: &mut std::task::Context<'_>,
274 ) -> std::task::Poll<Self::Output> {
275 let inner_poll = self.pinner.as_mut().poll(cx);
276 match inner_poll {
277 Poll::Ready(result) => Poll::Ready(match result {
278 Ok(Ok(keep_alive)) => Ok(keep_alive),
279 Ok(Err(e)) => Err(TimerOpsError::Driver(e)),
280 Err(e) => Err(TimerOpsError::Fidl(e)),
281 }),
282 Poll::Pending => Poll::Pending,
283 }
284 }
285}
286
287async fn stop_hrtimer(hrtimer: &Box<dyn TimerOps>, timer_config: &TimerConfig) {
289 trace::duration!(c"alarms", c"hrtimer:stop", "id" => timer_config.id);
290 debug!("stop_hrtimer: stopping hardware timer: {}", timer_config.id);
291 hrtimer.stop(timer_config.id).await;
292 debug!("stop_hrtimer: stopped hardware timer: {}", timer_config.id);
293}
294
295const CHANNEL_SIZE: usize = 1000;
298
299#[derive(Debug)]
301enum Cmd {
302 Start {
304 conn_id: zx::Koid,
306 deadline: timers::Deadline,
308 mode: Option<fta::SetMode>,
317 alarm_id: String,
319 responder: Rc<dyn timers::Responder>,
327 },
328 StopById {
329 done: zx::Event,
330 timer_id: timers::Id,
331 },
332 Alarm {
333 expired_deadline: fasync::BootInstant,
334 keep_alive: fidl::EventPair,
335 },
336 AlarmFidlError {
337 expired_deadline: fasync::BootInstant,
338 error: fidl::Error,
339 },
340 AlarmDriverError {
341 expired_deadline: fasync::BootInstant,
342 error: ffhh::DriverError,
343
344 timer_config_id: u64,
346 resolution_nanos: i64,
347 ticks: u64,
348 },
349 UtcUpdated {
351 transform: fxr::UtcClockTransform,
353 },
354}
355
356impl std::fmt::Display for Cmd {
357 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
358 match self {
359 Cmd::Start { conn_id, deadline, alarm_id, .. } => {
360 write!(
361 f,
362 "Start[alarm_id=\"{}\", conn_id={:?}, deadline={}]",
363 alarm_id, conn_id, deadline,
364 )
365 }
366 Cmd::Alarm { expired_deadline, .. } => {
367 write!(f, "Alarm[deadline={}]", format_timer((*expired_deadline).into()))
368 }
369 Cmd::AlarmFidlError { expired_deadline, error } => {
370 write!(
371 f,
372 "FIDLError[deadline={}, err={}, NO_WAKE_LEASE!]",
373 format_timer((*expired_deadline).into()),
374 error
375 )
376 }
377 Cmd::AlarmDriverError { expired_deadline, error, .. } => {
378 write!(
379 f,
380 "DriverError[deadline={}, err={:?}, NO_WAKE_LEASE!]",
381 format_timer((*expired_deadline).into()),
382 error
383 )
384 }
385 Cmd::StopById { timer_id, done: _ } => {
386 write!(f, "StopById[timerId={}]", timer_id,)
387 }
388 Cmd::UtcUpdated { transform } => {
389 write!(f, "UtcUpdated[timerId={transform:?}]")
390 }
391 }
392 }
393}
394
395pub fn get_stream_koid(
408 stream: fta::WakeAlarmsRequestStream,
409) -> (zx::Koid, fta::WakeAlarmsRequestStream) {
410 let (inner, is_terminated) = stream.into_inner();
411 let koid = inner.channel().as_channel().get_koid().expect("infallible");
412 let stream = fta::WakeAlarmsRequestStream::from_inner(inner, is_terminated);
413 (koid, stream)
414}
415
416pub async fn serve(timer_loop: Rc<Loop>, requests: fta::WakeAlarmsRequestStream) {
426 let timer_loop = timer_loop.clone();
427 let timer_loop_send = || timer_loop.get_sender();
428 let (conn_id, mut requests) = get_stream_koid(requests);
429 let mut request_count = 0;
430 debug!("alarms::serve: opened connection: {:?}", conn_id);
431 while let Some(maybe_request) = requests.next().await {
432 request_count += 1;
433 debug!("alarms::serve: conn_id: {:?} incoming request: {}", conn_id, request_count);
434 match maybe_request {
435 Ok(request) => {
436 handle_request(conn_id, timer_loop_send(), request).await;
438 }
439 Err(e) => {
440 warn!("alarms::serve: error in request: {:?}", e);
441 }
442 }
443 debug!("alarms::serve: conn_id: {:?} done request: {}", conn_id, request_count);
444 }
445 warn!("alarms::serve: CLOSED CONNECTION: conn_id: {:?}", conn_id);
448}
449
450async fn handle_cancel(alarm_id: String, conn_id: zx::Koid, cmd: &mut mpsc::Sender<Cmd>) {
451 let done = zx::Event::create();
452 let timer_id = timers::Id::new(alarm_id.clone(), conn_id);
453 if let Err(e) = cmd.send(Cmd::StopById { timer_id, done: clone_handle(&done) }).await {
454 warn!("handle_request: error while trying to cancel: {}: {:?}", alarm_id, e);
455 }
456 wait_signaled(&done).await;
457}
458
459async fn handle_request(
467 conn_id: zx::Koid,
468 mut cmd: mpsc::Sender<Cmd>,
469 request: fta::WakeAlarmsRequest,
470) {
471 match request {
472 fta::WakeAlarmsRequest::SetAndWait { deadline, mode, alarm_id, responder } => {
473 let responder = Rc::new(RefCell::new(Some(responder)));
483
484 debug!(
486 "handle_request: scheduling alarm_id: \"{}\"\n\tconn_id: {:?}\n\tdeadline: {}",
487 alarm_id,
488 conn_id,
489 format_timer(deadline.into())
490 );
491 let deadline = timers::Deadline::Boot(deadline.into());
493 if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
494 conn_id,
495 deadline,
496 mode: Some(mode),
497 alarm_id: alarm_id.clone(),
498 responder: responder.clone(),
499 })) {
500 warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
501 responder
502 .borrow_mut()
503 .take()
504 .expect("always present if call fails")
505 .send(Err(fta::WakeAlarmsError::Internal))
506 .unwrap();
507 }
508 }
509 fta::WakeAlarmsRequest::SetAndWaitUtc { deadline, mode, alarm_id, responder } => {
510 let deadline =
512 timers::Deadline::Utc(fxr::UtcInstant::from_nanos(deadline.timestamp_utc));
513
514 let responder = Rc::new(RefCell::new(Some(responder)));
517 debug!(
518 "handle_request: scheduling alarm_id UTC: \"{alarm_id}\"\n\tconn_id: {conn_id:?}\n\tdeadline: {deadline}",
519 );
520
521 if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
522 conn_id,
523 deadline,
524 mode: Some(mode),
525 alarm_id: alarm_id.clone(),
526 responder: responder.clone(),
527 })) {
528 warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
529 responder
530 .borrow_mut()
531 .take()
532 .expect("always present if call fails")
533 .send(Err(fta::WakeAlarmsError::Internal))
534 .unwrap();
535 }
536 }
537 fta::WakeAlarmsRequest::Cancel { alarm_id, .. } => {
538 log_long_op!(handle_cancel(alarm_id, conn_id, &mut cmd));
541 }
542 fta::WakeAlarmsRequest::Set { notifier, deadline, mode, alarm_id, responder } => {
543 debug!(
545 "handle_request: scheduling alarm_id: \"{alarm_id}\"\n\tconn_id: {conn_id:?}\n\tdeadline: {}",
546 format_timer(deadline.into())
547 );
548 if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
550 conn_id,
551 deadline: timers::Deadline::Boot(deadline.into()),
552 mode: Some(mode),
553 alarm_id: alarm_id.clone(),
554 responder: Rc::new(RefCell::new(Some(notifier))),
555 })) {
556 warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
557 responder.send(Err(fta::WakeAlarmsError::Internal)).unwrap();
558 } else {
559 responder.send(Ok(())).unwrap();
561 }
562 }
563 fta::WakeAlarmsRequest::_UnknownMethod { .. } => {}
564 };
565}
566
567pub struct Loop {
574 snd: mpsc::Sender<Cmd>,
577}
578
579impl Loop {
580 pub fn new(
595 scope: fasync::ScopeHandle,
596 device_proxy: ffhh::DeviceProxy,
597 inspect: finspect::Node,
598 utc_clock: fxr::UtcClock,
599 ) -> Self {
600 let hw_device_timer_ops = HardwareTimerOps::new(device_proxy);
601 Loop::new_internal(scope, hw_device_timer_ops, inspect, utc_clock)
602 }
603
604 pub fn new_emulated(
618 scope: fasync::ScopeHandle,
619 inspect: finspect::Node,
620 utc_clock: fxr::UtcClock,
621 ) -> Self {
622 let timer_ops = Box::new(EmulationTimerOps::new());
623 Loop::new_internal(scope, timer_ops, inspect, utc_clock)
624 }
625
626 fn new_internal(
627 scope: fasync::ScopeHandle,
628 timer_ops: Box<dyn TimerOps>,
629 inspect: finspect::Node,
630 utc_clock: fxr::UtcClock,
631 ) -> Self {
632 let utc_transform = Rc::new(RefCell::new(
633 utc_clock.get_details().expect("has UTC clock READ capability").reference_to_synthetic,
634 ));
635
636 let (snd, rcv) = mpsc::channel(CHANNEL_SIZE);
637 let loop_scope = scope.clone();
638
639 scope.spawn_local(wake_timer_loop(
640 loop_scope,
641 snd.clone(),
642 rcv,
643 timer_ops,
644 inspect,
645 utc_transform,
646 ));
647 scope.spawn_local(monitor_utc_clock_changes(utc_clock, snd.clone()));
648 Self { snd }
649 }
650
651 fn get_sender(&self) -> mpsc::Sender<Cmd> {
654 self.snd.clone()
655 }
656}
657
658async fn monitor_utc_clock_changes(utc_clock: fxr::UtcClock, mut cmd: mpsc::Sender<Cmd>) {
661 let koid = utc_clock.as_handle_ref().get_koid();
662 log::info!("monitor_utc_clock_changes: entry");
663 loop {
664 fasync::OnSignals::new(utc_clock.as_handle_ref(), zx::Signals::CLOCK_UPDATED)
666 .await
667 .expect("UTC clock is readable");
668
669 let transform =
670 utc_clock.get_details().expect("UTC clock details are readable").reference_to_synthetic;
671 log::debug!("Received a UTC update: koid={koid:?}: {transform:?}");
672 if let Err(err) = cmd.send(Cmd::UtcUpdated { transform }).await {
673 log::warn!("monitor_utc_clock_changes: exit: {err:?}");
675 break;
676 }
677 }
678}
679
680pub fn clone_handle<H: HandleBased>(handle: &H) -> H {
692 handle.duplicate_handle(zx::Rights::SAME_RIGHTS).expect("infallible")
693}
694
695async fn wait_signaled<H: HandleBased>(handle: &H) {
696 fasync::OnSignals::new(handle, zx::Signals::EVENT_SIGNALED).await.expect("infallible");
697}
698
699pub(crate) fn signal<H: HandleBased>(handle: &H) {
700 handle.signal_handle(zx::Signals::NONE, zx::Signals::EVENT_SIGNALED).expect("infallible");
701}
702
703#[derive(Debug, Clone, Copy)]
711struct TimerDuration {
712 resolution: zx::BootDuration,
714 ticks: u64,
717}
718
719impl Eq for TimerDuration {}
722
723impl std::cmp::PartialOrd for TimerDuration {
724 fn partial_cmp(&self, other: &TimerDuration) -> Option<std::cmp::Ordering> {
725 Some(self.cmp(other))
726 }
727}
728
729impl std::cmp::PartialEq for TimerDuration {
730 fn eq(&self, other: &Self) -> bool {
731 self.cmp(other) == std::cmp::Ordering::Equal
732 }
733}
734
735impl std::cmp::Ord for TimerDuration {
736 fn cmp(&self, other: &TimerDuration) -> std::cmp::Ordering {
739 let self_ticks_128: i128 = self.ticks as i128;
740 let self_resolution: i128 = self.resolution_as_nanos() as i128;
741 let self_nanos = self_resolution * self_ticks_128;
742
743 let other_ticks_128: i128 = other.ticks as i128;
744 let other_resolution: i128 = other.resolution_as_nanos() as i128;
745 let other_nanos = other_resolution * other_ticks_128;
746
747 self_nanos.cmp(&other_nanos)
748 }
749}
750
751impl std::fmt::Display for TimerDuration {
752 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
756 let ticks = self.ticks;
757 let resolution = self.resolution();
758 write!(f, "{}x{}", ticks, format_duration(resolution),)
760 }
761}
762
763impl TimerDuration {
764 fn max() -> Self {
766 TimerDuration::new(zx::BootDuration::from_nanos(1), *I64_MAX_AS_U64)
767 }
768
769 fn zero() -> Self {
771 TimerDuration::new(zx::BootDuration::from_nanos(1), 0)
772 }
773
774 fn new(resolution: zx::BootDuration, ticks: u64) -> Self {
776 Self { resolution, ticks }
777 }
778
779 fn new_with_resolution(res_source: &TimerDuration, ticks: u64) -> Self {
782 Self::new(res_source.resolution, ticks)
783 }
784
785 fn duration(&self) -> zx::BootDuration {
790 let duration_as_nanos = self.resolution_as_nanos() * self.ticks;
791 let clamp_duration = std::cmp::min(*I32_MAX_AS_U64, duration_as_nanos);
792 zx::BootDuration::from_nanos(clamp_duration.try_into().expect("result was clamped"))
793 }
794
795 fn resolution(&self) -> zx::BootDuration {
797 self.resolution
798 }
799
800 fn resolution_as_nanos(&self) -> u64 {
801 self.resolution().into_nanos().try_into().expect("resolution is never negative")
802 }
803
804 fn ticks(&self) -> u64 {
806 self.ticks
807 }
808}
809
810impl From<zx::BootDuration> for TimerDuration {
811 fn from(d: zx::BootDuration) -> TimerDuration {
812 let nanos = d.into_nanos();
813 assert!(nanos >= 0);
814 let nanos_u64 = nanos.try_into().expect("guarded by assert");
815 TimerDuration::new(zx::BootDuration::from_nanos(1), nanos_u64)
816 }
817}
818
819impl std::ops::Div for TimerDuration {
820 type Output = u64;
821 fn div(self, rhs: Self) -> Self::Output {
822 let self_nanos = self.resolution_as_nanos() * self.ticks;
823 let rhs_nanos = rhs.resolution_as_nanos() * rhs.ticks;
824 self_nanos / rhs_nanos
825 }
826}
827
828impl std::ops::Mul<u64> for TimerDuration {
829 type Output = Self;
830 fn mul(self, rhs: u64) -> Self::Output {
831 Self::new(self.resolution, self.ticks * rhs)
832 }
833}
834
835#[derive(Debug)]
837pub(crate) struct TimerConfig {
838 resolutions: Vec<zx::BootDuration>,
846 max_ticks: u64,
851 id: u64,
853}
854
855impl TimerConfig {
856 fn new_from_data(timer_id: u64, resolutions: &[zx::BootDuration], max_ticks: u64) -> Self {
859 debug!(
860 "TimerConfig: resolutions: {:?}, max_ticks: {}, timer_id: {}",
861 resolutions.iter().map(|r| format_duration(*r)).collect::<Vec<_>>(),
862 max_ticks,
863 timer_id
864 );
865 let resolutions = resolutions.iter().map(|d| *d).collect::<Vec<zx::BootDuration>>();
866 TimerConfig { resolutions, max_ticks, id: timer_id }
867 }
868
869 fn new_empty() -> Self {
870 error!("TimerConfig::new_empty() called, this is not OK.");
871 TimerConfig { resolutions: vec![], max_ticks: 0, id: 0 }
872 }
873
874 fn pick_setting(&self, duration: zx::BootDuration) -> TimerDuration {
884 assert!(self.resolutions.len() > 0, "there must be at least one supported resolution");
885
886 if duration <= zx::BootDuration::ZERO {
889 return TimerDuration::new(self.resolutions[0], 1);
890 }
891
892 let mut best_positive_slack = TimerDuration::zero();
899 let mut best_negative_slack = TimerDuration::max();
900
901 if self.max_ticks == 0 {
902 return TimerDuration::new(zx::BootDuration::from_millis(1), 0);
903 }
904 let duration_slack: TimerDuration = duration.into();
905
906 for res1 in self.resolutions.iter() {
907 let smallest_unit = TimerDuration::new(*res1, 1);
908 let max_tick_at_res = TimerDuration::new(*res1, self.max_ticks);
909
910 let smallest_slack_larger_than_duration = smallest_unit > duration_slack;
911 let largest_slack_smaller_than_duration = max_tick_at_res < duration_slack;
912
913 if smallest_slack_larger_than_duration {
914 if duration_slack == TimerDuration::zero() {
915 best_negative_slack = TimerDuration::zero();
916 } else if smallest_unit < best_negative_slack {
917 best_negative_slack = smallest_unit;
918 }
919 }
920 if largest_slack_smaller_than_duration {
921 if max_tick_at_res > best_positive_slack
922 || best_positive_slack == TimerDuration::zero()
923 {
924 best_positive_slack = max_tick_at_res;
925 }
926 }
927
928 if !smallest_slack_larger_than_duration && !largest_slack_smaller_than_duration {
930 let q = duration_slack / smallest_unit;
933 let d = smallest_unit * q;
934 if d == duration_slack {
935 return d;
937 } else {
938 if d > best_positive_slack {
940 best_positive_slack = TimerDuration::new_with_resolution(&smallest_unit, q);
941 }
942 let d_plus = TimerDuration::new_with_resolution(&smallest_unit, q + 1);
943 if d_plus < best_negative_slack {
944 best_negative_slack = d_plus;
945 }
946 }
947 }
948 }
949
950 let p_slack = duration - best_positive_slack.duration();
951 let n_slack = best_negative_slack.duration() - duration;
952
953 let ret = if p_slack < n_slack && best_positive_slack.duration().into_nanos() > 0 {
958 best_positive_slack
959 } else {
960 best_negative_slack
961 };
962 debug!("TimerConfig: picked slack: {} for duration: {}", ret, format_duration(duration));
963 assert!(
964 ret.duration().into_nanos() >= 0,
965 "ret: {}, p_slack: {}, n_slack: {}, orig.duration: {}\n\tbest_p_slack: {}\n\tbest_n_slack: {}\n\ttarget: {}\n\t 1: {} 2: {:?}, 3: {:?}",
966 ret,
967 format_duration(p_slack),
968 format_duration(n_slack),
969 format_duration(duration),
970 best_positive_slack,
971 best_negative_slack,
972 duration_slack,
973 p_slack != zx::BootDuration::ZERO,
974 p_slack,
975 zx::BootDuration::ZERO,
976 );
977 ret
978 }
979}
980
981async fn get_timer_properties(hrtimer: &Box<dyn TimerOps>) -> TimerConfig {
982 debug!("get_timer_properties: requesting timer properties.");
983 hrtimer.get_timer_properties().await
984}
985
986struct TimerState {
988 task: fasync::Task<()>,
990 deadline: fasync::BootInstant,
992}
993
994async fn wake_timer_loop(
1003 scope: fasync::ScopeHandle,
1004 snd: mpsc::Sender<Cmd>,
1005 mut cmds: mpsc::Receiver<Cmd>,
1006 timer_proxy: Box<dyn TimerOps>,
1007 inspect: finspect::Node,
1008 utc_transform: Rc<RefCell<fxr::UtcClockTransform>>,
1009) {
1010 debug!("wake_timer_loop: started");
1011
1012 let mut timers = timers::Heap::new(utc_transform.clone());
1013 let timer_config = get_timer_properties(&timer_proxy).await;
1014
1015 #[allow(clippy::collection_is_never_read)]
1018 let mut hrtimer_status: Option<TimerState> = None;
1019
1020 let now_prop = inspect.create_int("now_ns", 0);
1027 let now_formatted_prop = inspect.create_string("now_formatted", "");
1028 let pending_timers_count_prop = inspect.create_uint("pending_timers_count", 0);
1029 let pending_timers_prop = inspect.create_string("pending_timers", "");
1030 let _deadline_histogram_prop = inspect.create_int_exponential_histogram(
1031 "requested_deadlines_ns",
1032 finspect::ExponentialHistogramParams {
1033 floor: 0,
1034 initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1035 step_multiplier: 10,
1037 buckets: 16,
1038 },
1039 );
1040 let slack_histogram_prop = inspect.create_int_exponential_histogram(
1041 "slack_ns",
1042 finspect::ExponentialHistogramParams {
1043 floor: 0,
1044 initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1045 step_multiplier: 10,
1046 buckets: 16,
1047 },
1048 );
1049 let schedule_delay_prop = inspect.create_int_exponential_histogram(
1050 "schedule_delay_ns",
1051 finspect::ExponentialHistogramParams {
1052 floor: 0,
1053 initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1054 step_multiplier: 10,
1055 buckets: 16,
1056 },
1057 );
1058 let boot_deadlines_count_prop = inspect.create_uint("boot_deadlines_count", 0);
1059 let utc_deadlines_count_prop = inspect.create_uint("utc_deadlines_count", 0);
1060 let hw_node = inspect.create_child("hardware");
1062 let current_hw_deadline_prop = hw_node.create_string("current_deadline", "");
1063 let remaining_until_alarm_prop = hw_node.create_string("remaining_until_alarm", "");
1064
1065 let debug_node = inspect.create_child("debug_node");
1067 let start_notify_setup_count = debug_node.create_int("start_notify_setup", 0);
1068 let start_count = debug_node.create_int("start_count", 0);
1069 let responder_count = debug_node.create_int("responder_count", 0);
1070 let stop_count = debug_node.create_int("stop", 0);
1071 let stop_responder_count = debug_node.create_int("stop_responder", 0);
1072 let stop_hrtimer_count = debug_node.create_int("stop_hrtimer", 0);
1073 let schedule_hrtimer_count = debug_node.create_int("schedule_hrtimer", 0);
1074 let alarm_count = debug_node.create_int("alarm", 0);
1075 let alarm_fidl_count = debug_node.create_int("alarm_fidl", 0);
1076 let alarm_driver_count = debug_node.create_int("alarm_driver", 0);
1077 let utc_update_count = debug_node.create_int("utc_update", 0);
1078 let status_count = debug_node.create_int("status", 0);
1079 let loop_count = debug_node.create_int("loop_count", 0);
1080
1081 let hrtimer_node = debug_node.create_child("hrtimer");
1082
1083 while let Some(cmd) = cmds.next().await {
1084 let _i = ScopedInc::new(&loop_count);
1085 trace::duration!(c"alarms", c"Cmd");
1086 let now = fasync::BootInstant::now();
1088 now_prop.set(now.into_nanos());
1089 trace::instant!(c"alarms", c"wake_timer_loop", trace::Scope::Process, "now" => now.into_nanos());
1090 match cmd {
1091 Cmd::Start { conn_id, deadline, mode, alarm_id, responder } => {
1092 let _i = ScopedInc::new(&start_count);
1093 trace::duration!(c"alarms", c"Cmd::Start");
1094 fuchsia_trace::flow_step!(
1095 c"alarms",
1096 c"hrtimer_lifecycle",
1097 timers::get_trace_id(&alarm_id)
1098 );
1099 debug!(
1101 "wake_timer_loop: START alarm_id: \"{}\", conn_id: {:?}\n\tdeadline: {}\n\tnow: {}",
1102 alarm_id,
1103 conn_id,
1104 deadline,
1105 format_timer(now.into()),
1106 );
1107
1108 defer! {
1109 let _i = ScopedInc::new(&start_notify_setup_count);
1110 if let Some(mode) = mode {
1112 if let fta::SetMode::NotifySetupDone(setup_done) = mode {
1113 signal(&setup_done);
1115 debug!("wake_timer_loop: START: setup_done signaled");
1116 };
1117 }
1118 }
1119 let deadline_boot = deadline.as_boot(&*utc_transform.borrow());
1120
1121 match deadline {
1125 timers::Deadline::Boot(_) => boot_deadlines_count_prop.add(1),
1126 timers::Deadline::Utc(_) => utc_deadlines_count_prop.add(1),
1127 };
1128
1129 if timers::Heap::expired(now, deadline_boot) {
1130 trace::duration!(c"alarms", c"Cmd::Start:immediate");
1131 fuchsia_trace::flow_step!(
1132 c"alarms",
1133 c"hrtimer_lifecycle",
1134 timers::get_trace_id(&alarm_id)
1135 );
1136 let (_lease, keep_alive) = zx::EventPair::create();
1138 debug!(
1139 "[{}] wake_timer_loop: bogus lease {:?}",
1140 line!(),
1141 &keep_alive.get_koid().unwrap()
1142 );
1143
1144 {
1145 let _i1 = ScopedInc::new(&responder_count);
1146 if let Err(e) = responder
1147 .send(&alarm_id, Ok(keep_alive))
1148 .expect("responder is always present")
1149 {
1150 error!(
1151 "wake_timer_loop: conn_id: {conn_id:?}, alarm: {alarm_id}: could not notify, dropping: {e}",
1152 );
1153 } else {
1154 debug!(
1155 "wake_timer_loop: conn_id: {conn_id:?}, alarm: {alarm_id}: EXPIRED IMMEDIATELY\n\tdeadline({}) <= now({})\n\tfull deadline: {}",
1156 format_timer(deadline_boot.into()),
1157 format_timer(now.into()),
1158 deadline,
1159 )
1160 }
1161 }
1162 } else {
1163 trace::duration!(c"alarms", c"Cmd::Start:regular");
1164 fuchsia_trace::flow_step!(
1165 c"alarms",
1166 c"hrtimer_lifecycle",
1167 timers::get_trace_id(&alarm_id)
1168 );
1169 let was_empty = timers.is_empty();
1171
1172 let deadline_before = timers.peek_deadline_as_boot();
1173 let node = match deadline {
1174 timers::Deadline::Boot(_) => {
1175 timers.new_node_boot(deadline_boot, alarm_id, conn_id, responder)
1176 }
1177 timers::Deadline::Utc(d) => {
1178 timers.new_node_utc(d, alarm_id, conn_id, responder)
1179 }
1180 };
1181 timers.push(node);
1182 let deadline_after = timers.peek_deadline_as_boot();
1183
1184 let deadline_changed = is_deadline_changed(deadline_before, deadline_after);
1185 let needs_cancel = !was_empty && deadline_changed;
1186 let needs_reschedule = was_empty || deadline_changed;
1187
1188 if needs_reschedule {
1189 let schedulable_deadline = deadline_after.unwrap_or(deadline_boot);
1191 if needs_cancel {
1192 log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1193 }
1194 hrtimer_status = Some(log_long_op!(schedule_hrtimer(
1195 scope.clone(),
1196 now,
1197 &timer_proxy,
1198 schedulable_deadline,
1199 snd.clone(),
1200 &timer_config,
1201 &schedule_delay_prop,
1202 &hrtimer_node,
1203 )));
1204 }
1205 }
1206 }
1207 Cmd::StopById { timer_id, done } => {
1208 let _i = ScopedInc::new(&stop_count);
1209 defer! {
1210 signal(&done);
1211 }
1212 trace::duration!(c"alarms", c"Cmd::StopById", "alarm_id" => timer_id.alarm());
1213 fuchsia_trace::flow_step!(
1214 c"alarms",
1215 c"hrtimer_lifecycle",
1216 timers::get_trace_id(&timer_id.alarm())
1217 );
1218 debug!("wake_timer_loop: STOP timer: {}", timer_id);
1219 let deadline_before = timers.peek_deadline_as_boot();
1220
1221 if let Some(timer_node) = timers.remove_by_id(&timer_id) {
1222 let deadline_after = timers.peek_deadline_as_boot();
1223
1224 {
1225 let _i = ScopedInc::new(&stop_responder_count);
1226 if let Some(res) = timer_node
1227 .get_responder()
1228 .send(timer_node.id().alarm(), Err(fta::WakeAlarmsError::Dropped))
1229 {
1230 res.expect("infallible");
1232 }
1233 }
1234 if is_deadline_changed(deadline_before, deadline_after) {
1235 let _i = ScopedInc::new(&stop_hrtimer_count);
1236 log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1237 }
1238 if let Some(deadline) = deadline_after {
1239 let _i = ScopedInc::new(&schedule_hrtimer_count);
1240 let new_timer_state = log_long_op!(schedule_hrtimer(
1243 scope.clone(),
1244 now,
1245 &timer_proxy,
1246 deadline,
1247 snd.clone(),
1248 &timer_config,
1249 &schedule_delay_prop,
1250 &hrtimer_node,
1251 ));
1252 let old_hrtimer_status = hrtimer_status.replace(new_timer_state);
1253 if let Some(task) = old_hrtimer_status.map(|ev| ev.task) {
1254 log_long_op!(task);
1258 }
1259 } else {
1260 hrtimer_status = None;
1262 }
1263 } else {
1264 debug!("wake_timer_loop: STOP: no active timer to stop: {}", timer_id);
1265 }
1266 }
1267 Cmd::Alarm { expired_deadline, keep_alive } => {
1268 let _i = ScopedInc::new(&alarm_count);
1269
1270 trace::duration!(c"alarms", c"Cmd::Alarm");
1271 debug!(
1276 "wake_timer_loop: ALARM!!! reached deadline: {}, wakey-wakey! {:?}",
1277 format_timer(expired_deadline.into()),
1278 keep_alive.get_koid().unwrap(),
1279 );
1280 let expired_count =
1281 notify_all(&mut timers, &keep_alive, now, &slack_histogram_prop)
1282 .expect("notification succeeds");
1283 if expired_count == 0 {
1284 debug!("wake_timer_loop: no expired alarms, reset hrtimer state");
1287 log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1288 }
1289 hrtimer_status = match timers.peek_deadline_as_boot() {
1291 None => None,
1292 Some(deadline) => Some(log_long_op!(schedule_hrtimer(
1293 scope.clone(),
1294 now,
1295 &timer_proxy,
1296 deadline,
1297 snd.clone(),
1298 &timer_config,
1299 &schedule_delay_prop,
1300 &hrtimer_node,
1301 ))),
1302 }
1303 }
1304 Cmd::AlarmFidlError { expired_deadline, error } => {
1305 let _i = ScopedInc::new(&alarm_fidl_count);
1306
1307 trace::duration!(c"alarms", c"Cmd::AlarmFidlError");
1308 warn!(
1312 "wake_timer_loop: FIDL error: {:?}, deadline: {}, now: {}",
1313 error,
1314 format_timer(expired_deadline.into()),
1315 format_timer(now.into()),
1316 );
1317 let (_dummy_lease, peer) = zx::EventPair::create();
1320 debug!(
1321 "bogus lease: {:?} fidl error [{}:{}]",
1322 &peer.get_koid().unwrap(),
1323 file!(),
1324 line!()
1325 );
1326 notify_all(&mut timers, &peer, now, &slack_histogram_prop)
1327 .expect("notification succeeds");
1328 hrtimer_status = match timers.peek_deadline_as_boot() {
1329 None => None, Some(deadline) => Some(log_long_op!(schedule_hrtimer(
1331 scope.clone(),
1332 now,
1333 &timer_proxy,
1334 deadline,
1335 snd.clone(),
1336 &timer_config,
1337 &schedule_delay_prop,
1338 &hrtimer_node,
1339 ))),
1340 }
1341 }
1342 Cmd::AlarmDriverError {
1343 expired_deadline,
1344 error,
1345 timer_config_id,
1346 resolution_nanos,
1347 ticks,
1348 } => {
1349 let _i = ScopedInc::new(&alarm_driver_count);
1350
1351 trace::duration!(c"alarms", c"Cmd::AlarmDriverError");
1352 let (_dummy_lease, peer) = zx::EventPair::create();
1353 debug!(
1354 "bogus lease: {:?} driver error. [{}:{}]",
1355 &peer.get_koid().unwrap(),
1356 file!(),
1357 line!()
1358 );
1359 notify_all(&mut timers, &peer, now, &slack_histogram_prop)
1360 .expect("notification succeeds");
1361 match error {
1362 fidl_fuchsia_hardware_hrtimer::DriverError::Canceled => {
1363 debug!(
1365 "wake_timer_loop: CANCELED timer at deadline: {}",
1366 format_timer(expired_deadline.into())
1367 );
1368 }
1369 _ => {
1370 error!(
1371 "wake_timer_loop: DRIVER SAYS: {:?}, deadline: {}, now: {}\n\ttimer_id={}\n\tresolution={}\n\tticks={}",
1372 error,
1373 format_timer(expired_deadline.into()),
1374 format_timer(now.into()),
1375 timer_config_id,
1376 resolution_nanos,
1377 ticks,
1378 );
1379 hrtimer_status = match timers.peek_deadline_as_boot() {
1383 None => None,
1384 Some(deadline) => Some(log_long_op!(schedule_hrtimer(
1385 scope.clone(),
1386 now,
1387 &timer_proxy,
1388 deadline,
1389 snd.clone(),
1390 &timer_config,
1391 &schedule_delay_prop,
1392 &hrtimer_node,
1393 ))),
1394 }
1395 }
1396 }
1397 }
1398 Cmd::UtcUpdated { transform } => {
1399 let _i = ScopedInc::new(&utc_update_count);
1400
1401 trace::duration!(c"alarms", c"Cmd::UtcUpdated");
1402 debug!("wake_timer_loop: applying new clock transform: {transform:?}");
1403
1404 *utc_transform.borrow_mut() = transform;
1407
1408 if hrtimer_status.is_some() {
1411 log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1412 hrtimer_status = match timers.peek_deadline_as_boot() {
1414 None => None,
1415 Some(deadline) => Some(log_long_op!(schedule_hrtimer(
1416 scope.clone(),
1417 now,
1418 &timer_proxy,
1419 deadline,
1420 snd.clone(),
1421 &timer_config,
1422 &schedule_delay_prop,
1423 &hrtimer_node,
1424 ))),
1425 }
1426 }
1427 }
1428 }
1429
1430 {
1431 let _i = ScopedInc::new(&status_count);
1432
1433 trace::duration!(c"timekeeper", c"inspect");
1438 let now_formatted = format_timer(now.into());
1439 debug!("wake_timer_loop: now: {}", &now_formatted);
1440 now_formatted_prop.set(&now_formatted);
1441
1442 let pending_timers_count: u64 =
1443 timers.timer_count().try_into().expect("always convertible");
1444 debug!("wake_timer_loop: currently pending timer count: {}", pending_timers_count);
1445 pending_timers_count_prop.set(pending_timers_count);
1446
1447 let pending_timers = format!("{}", timers);
1448 debug!("wake_timer_loop: currently pending timers: \n\t{}", &timers);
1449 pending_timers_prop.set(&pending_timers);
1450
1451 let current_deadline: String = hrtimer_status
1452 .as_ref()
1453 .map(|s| format!("{}", format_timer(s.deadline.into())))
1454 .unwrap_or_else(|| "(none)".into());
1455 debug!("wake_timer_loop: current hardware timer deadline: {:?}", current_deadline);
1456 current_hw_deadline_prop.set(¤t_deadline);
1457
1458 let remaining_duration_until_alarm = hrtimer_status
1459 .as_ref()
1460 .map(|s| format!("{}", format_duration((s.deadline - now).into())))
1461 .unwrap_or_else(|| "(none)".into());
1462 debug!(
1463 "wake_timer_loop: remaining duration until alarm: {}",
1464 &remaining_duration_until_alarm
1465 );
1466 remaining_until_alarm_prop.set(&remaining_duration_until_alarm);
1467 debug!("---");
1468 }
1469 }
1470
1471 log::info!("wake_timer_loop: exiting. This is only correct in test code.");
1474}
1475
1476async fn schedule_hrtimer(
1490 scope: fasync::ScopeHandle,
1491 now: fasync::BootInstant,
1492 hrtimer: &Box<dyn TimerOps>,
1493 deadline: fasync::BootInstant,
1494 mut command_send: mpsc::Sender<Cmd>,
1495 timer_config: &TimerConfig,
1496 _schedule_delay_histogram: &finspect::IntExponentialHistogramProperty,
1497 debug_node: &finspect::Node,
1498) -> TimerState {
1499 let timeout = std::cmp::max(zx::BootDuration::ZERO, deadline - now);
1500 trace::duration!(c"alarms", c"schedule_hrtimer", "timeout" => timeout.into_nanos());
1501 let hrtimer_scheduled = zx::Event::create();
1503
1504 let schedule_count = debug_node.create_int("schedule", 0);
1505 let hrtimer_wait_count = debug_node.create_int("hrtimer_wait", 0);
1506 let wait_signaled_count = debug_node.create_int("wait_signaled", 0);
1507
1508 let _sc = ScopedInc::new(&schedule_count);
1509
1510 debug!(
1511 "schedule_hrtimer:\n\tnow: {}\n\tdeadline: {}\n\ttimeout: {}",
1512 format_timer(now.into()),
1513 format_timer(deadline.into()),
1514 format_duration(timeout),
1515 );
1516
1517 let slack = timer_config.pick_setting(timeout);
1518 let resolution_nanos = slack.resolution.into_nanos();
1519 let useful_ticks = std::cmp::max(MIN_USEFUL_TICKS, slack.ticks());
1522
1523 trace::instant!(c"alarms", c"hrtimer:programmed",
1524 trace::Scope::Process,
1525 "resolution_ns" => resolution_nanos,
1526 "ticks" => useful_ticks
1527 );
1528 let timer_config_id = timer_config.id;
1529 let start_and_wait_fut = {
1530 let _sc = ScopedInc::new(&hrtimer_wait_count);
1531 hrtimer.start_and_wait(
1532 timer_config.id,
1533 &ffhh::Resolution::Duration(resolution_nanos),
1534 useful_ticks,
1535 clone_handle(&hrtimer_scheduled),
1536 )
1537 };
1538
1539 let hrtimer_scheduled_if_error = clone_handle(&hrtimer_scheduled);
1540 let hrtimer_task = scope.spawn_local(async move {
1541 debug!("hrtimer_task: waiting for hrtimer driver response");
1542 trace::instant!(c"alarms", c"hrtimer:started", trace::Scope::Process);
1543 let response = start_and_wait_fut.await;
1544 trace::instant!(c"alarms", c"hrtimer:response", trace::Scope::Process);
1545 match response {
1546 Err(TimerOpsError::Fidl(e)) => {
1547 defer! {
1548 signal(&hrtimer_scheduled_if_error);
1550 }
1551 trace::instant!(c"alarms", c"hrtimer:response:fidl_error", trace::Scope::Process);
1552 warn!("hrtimer_task: hrtimer FIDL error: {:?}", e);
1553 command_send
1554 .start_send(Cmd::AlarmFidlError { expired_deadline: now, error: e })
1555 .unwrap();
1556 }
1558 Err(TimerOpsError::Driver(e)) => {
1559 defer! {
1560 signal(&hrtimer_scheduled_if_error);
1563 }
1564 let driver_error_str = format!("{:?}", e);
1565 trace::instant!(c"alarms", c"hrtimer:response:driver_error", trace::Scope::Process, "error" => &driver_error_str[..]);
1566 debug!("schedule_hrtimer: hrtimer driver error: {:?}", e);
1569 command_send
1570 .start_send(Cmd::AlarmDriverError {
1571 expired_deadline: now,
1572 error: e,
1573 timer_config_id,
1574 resolution_nanos,
1575 ticks: useful_ticks,
1576 })
1577 .unwrap();
1578 }
1580 Ok(keep_alive) => {
1581 trace::instant!(c"alarms", c"hrtimer:response:alarm", trace::Scope::Process);
1582 debug!("hrtimer: got alarm response: {:?}", keep_alive);
1583 command_send
1585 .start_send(Cmd::Alarm { expired_deadline: deadline, keep_alive })
1586 .unwrap();
1587 }
1588 }
1589 debug!("hrtimer_task: exiting task.");
1590 trace::instant!(c"alarms", c"hrtimer:task_exit", trace::Scope::Process);
1591 }).into();
1592 debug!("schedule_hrtimer: waiting for event to be signaled");
1593
1594 {
1595 let _i = ScopedInc::new(&wait_signaled_count);
1596 log_long_op!(wait_signaled(&hrtimer_scheduled));
1598 }
1599
1600 let now_after_signaled = fasync::BootInstant::now();
1601 let duration_until_scheduled: zx::BootDuration = (now_after_signaled - now).into();
1602 if duration_until_scheduled > zx::BootDuration::from_nanos(LONG_DELAY_NANOS) {
1603 trace::duration!(c"alarms", c"schedule_hrtimer:unusual_duration",
1604 "duration" => duration_until_scheduled.into_nanos());
1605 warn!(
1606 "unusual duration until hrtimer scheduled: {}",
1607 format_duration(duration_until_scheduled)
1608 );
1609 }
1610 debug!("schedule_hrtimer: hrtimer wake alarm has been scheduled.");
1613 TimerState { task: hrtimer_task, deadline }
1614}
1615
1616fn notify_all(
1626 timers: &mut timers::Heap,
1627 lease_prototype: &zx::EventPair,
1628 reference_instant: fasync::BootInstant,
1629 _unusual_slack_histogram: &finspect::IntExponentialHistogramProperty,
1630) -> Result<usize> {
1631 trace::duration!(c"alarms", c"notify_all");
1632 let now = fasync::BootInstant::now();
1633 let mut expired = 0;
1634 while let Some(timer_node) = timers.maybe_expire_earliest(reference_instant) {
1635 expired += 1;
1636 let deadline = timer_node.get_boot_deadline();
1638 let alarm_id = timer_node.id().alarm().to_string();
1639 trace::duration!(c"alarms", c"notify_all:notified", "alarm_id" => &*alarm_id);
1640 fuchsia_trace::flow_step!(c"alarms", c"hrtimer_lifecycle", timers::get_trace_id(&alarm_id));
1641 let conn_id = timer_node.id().conn.clone();
1642 let slack: zx::BootDuration = deadline - now;
1643 if slack < zx::BootDuration::from_nanos(-LONG_DELAY_NANOS) {
1644 trace::duration!(c"alarms", c"schedule_hrtimer:unusual_slack", "slack" => slack.into_nanos());
1645 warn!(
1647 "alarm id: {} had an unusually large slack: {}",
1648 alarm_id,
1649 format_duration(slack)
1650 );
1651 }
1652 if slack < zx::BootDuration::ZERO {
1653 }
1656 debug!(
1657 concat!(
1658 "wake_alarm_loop: ALARM alarm_id: \"{}\"\n\tdeadline: {},\n\tconn_id: {:?},\n\t",
1659 "reference_instant: {},\n\tnow: {},\n\tslack: {}",
1660 ),
1661 alarm_id,
1662 format_timer(deadline.into()),
1663 conn_id,
1664 format_timer(reference_instant.into()),
1665 format_timer(now.into()),
1666 format_duration(slack),
1667 );
1668 let lease = clone_handle(lease_prototype);
1669 trace::instant!(c"alarms", c"notify", trace::Scope::Process, "alarm_id" => &alarm_id[..], "conn_id" => conn_id);
1670 if let Some(Err(e)) = timer_node.get_responder().send(timer_node.id().alarm(), Ok(lease)) {
1671 error!("could not signal responder: {:?}", e);
1672 }
1673 trace::instant!(c"alarms", c"notified", trace::Scope::Process);
1674 }
1675 trace::instant!(c"alarms", c"notify", trace::Scope::Process, "expired_count" => expired);
1676 debug!("notify_all: expired count: {}", expired);
1677 Ok(expired)
1678 }
1680
1681const HRTIMER_DIRECTORY: &str = "/dev/class/hrtimer";
1684
1685pub async fn connect_to_hrtimer_async() -> Result<ffhh::DeviceProxy> {
1694 debug!("connect_to_hrtimer: trying directory: {}", HRTIMER_DIRECTORY);
1695 let dir =
1696 fuchsia_fs::directory::open_in_namespace(HRTIMER_DIRECTORY, fidl_fuchsia_io::PERM_READABLE)
1697 .with_context(|| format!("Opening {}", HRTIMER_DIRECTORY))?;
1698 let path = device_watcher::watch_for_files(&dir)
1699 .await
1700 .with_context(|| format!("Watching for files in {}", HRTIMER_DIRECTORY))?
1701 .try_next()
1702 .await
1703 .with_context(|| format!("Getting a file from {}", HRTIMER_DIRECTORY))?;
1704 let path = path.ok_or_else(|| anyhow!("Could not find {}", HRTIMER_DIRECTORY))?;
1705 let path = path
1706 .to_str()
1707 .ok_or_else(|| anyhow!("Could not find a valid str for {}", HRTIMER_DIRECTORY))?;
1708 connect_to_named_protocol_at_dir_root::<ffhh::DeviceMarker>(&dir, path)
1709 .context("Failed to connect built-in service")
1710}
1711
1712#[cfg(test)]
1713mod tests {
1714 use super::*;
1715 use assert_matches::assert_matches;
1716 use diagnostics_assertions::{AnyProperty, assert_data_tree};
1717 use fuchsia_async::TestExecutor;
1718 use futures::select;
1719 use test_case::test_case;
1720 use test_util::{assert_gt, assert_lt};
1721
1722 fn fake_wake_lease() -> fidl_fuchsia_power_system::LeaseToken {
1723 let (_lease, peer) = zx::EventPair::create();
1724 peer
1725 }
1726
1727 #[test]
1728 fn timer_duration_no_overflow() {
1729 let duration1 = TimerDuration {
1730 resolution: zx::BootDuration::from_seconds(100_000_000),
1731 ticks: u64::MAX,
1732 };
1733 let duration2 = TimerDuration {
1734 resolution: zx::BootDuration::from_seconds(110_000_000),
1735 ticks: u64::MAX,
1736 };
1737 assert_eq!(duration1, duration1);
1738 assert_eq!(duration2, duration2);
1739
1740 assert_lt!(duration1, duration2);
1741 assert_gt!(duration2, duration1);
1742 }
1743
1744 #[test_case(
1745 TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1746 TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1747 )]
1748 #[test_case(
1749 TimerDuration::new(zx::BootDuration::from_nanos(1), 10),
1750 TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1751 )]
1752 #[test_case(
1753 TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1754 TimerDuration::new(zx::BootDuration::from_nanos(1), 10)
1755 )]
1756 #[test_case(
1757 TimerDuration::new(zx::BootDuration::from_micros(1), 1),
1758 TimerDuration::new(zx::BootDuration::from_nanos(1), 1000)
1759 )]
1760 fn test_slack_eq(one: TimerDuration, other: TimerDuration) {
1761 assert_eq!(one, other);
1762 }
1763
1764 #[test_case(
1765 TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1766 TimerDuration::new(zx::BootDuration::from_nanos(1), 2)
1767 )]
1768 #[test_case(
1769 TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1770 TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1771 )]
1772 fn test_slack_lt(one: TimerDuration, other: TimerDuration) {
1773 assert_lt!(one, other);
1774 }
1775
1776 #[test_case(
1777 TimerDuration::new(zx::BootDuration::from_nanos(1), 2),
1778 TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1779 )]
1780 #[test_case(
1781 TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1782 TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1783 )]
1784 fn test_slack_gt(one: TimerDuration, other: TimerDuration) {
1785 assert_gt!(one, other);
1786 }
1787
1788 #[test_case(
1789 vec![zx::BootDuration::from_nanos(1)],
1790 100,
1791 zx::BootDuration::from_nanos(0),
1792 TimerDuration::new(zx::BootDuration::from_nanos(1), 1) ; "0ns becomes 1ns"
1793 )]
1794 #[test_case(
1795 vec![zx::BootDuration::from_nanos(1)],
1796 100,
1797 zx::BootDuration::from_nanos(50),
1798 TimerDuration::new(zx::BootDuration::from_nanos(1), 50) ; "Exact at 50x1ns"
1799 )]
1800 #[test_case(
1801 vec![zx::BootDuration::from_nanos(2)],
1802 100,
1803 zx::BootDuration::from_nanos(50),
1804 TimerDuration::new(zx::BootDuration::from_nanos(2), 25) ; "Exact at 25x2ns"
1805 )]
1806 #[test_case(
1807 vec![zx::BootDuration::from_nanos(3)],
1808 100,
1809 zx::BootDuration::from_nanos(50),
1810 TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "Inexact at 51ns"
1812 )]
1813 #[test_case(
1814 vec![
1815 zx::BootDuration::from_nanos(3),
1816 zx::BootDuration::from_nanos(4)
1817 ],
1818 100,
1819 zx::BootDuration::from_nanos(50),
1820 TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "3ns is a better resolution"
1821 )]
1822 #[test_case(
1823 vec![
1824 zx::BootDuration::from_nanos(1000),
1825 ],
1826 100,
1827 zx::BootDuration::from_nanos(50),
1828 TimerDuration::new(zx::BootDuration::from_nanos(1000), 1) ;
1829 "950ns negative slack is the best we can do"
1830 )]
1831 #[test_case(
1832 vec![
1833 zx::BootDuration::from_nanos(1),
1834 ],
1835 10,
1836 zx::BootDuration::from_nanos(50),
1837 TimerDuration::new(zx::BootDuration::from_nanos(1), 10) ;
1838 "10ns positive slack is the best we can do"
1839 )]
1840 #[test_case(
1841 vec![
1842 zx::BootDuration::from_millis(1),
1843 zx::BootDuration::from_micros(100),
1844 zx::BootDuration::from_micros(10),
1845 zx::BootDuration::from_micros(1),
1846 ],
1847 20, zx::BootDuration::from_micros(150),
1849 TimerDuration::new(zx::BootDuration::from_micros(10), 15) ;
1850 "Realistic case with resolutions from driver, should be 15us"
1851 )]
1852 #[test_case(
1853 vec![
1854 zx::BootDuration::from_millis(1),
1855 zx::BootDuration::from_micros(100),
1856 zx::BootDuration::from_micros(10),
1857 zx::BootDuration::from_micros(1),
1858 ],
1859 2000, zx::BootDuration::from_micros(6000),
1861 TimerDuration::new(zx::BootDuration::from_millis(1), 6) ;
1862 "Coarser exact unit wins"
1863 )]
1864 #[test_case(
1865 vec![
1866 zx::BootDuration::from_millis(1),
1867 zx::BootDuration::from_millis(10),
1868 zx::BootDuration::from_millis(100),
1869 ],
1870 1000,
1871 zx::BootDuration::from_micros(-10),
1872 TimerDuration::new(zx::BootDuration::from_millis(1), 1) ;
1873 "Negative duration gets the smallest timer duration"
1874 )]
1875 #[test_case(
1876 vec![
1877 zx::BootDuration::from_millis(1),
1878 zx::BootDuration::from_millis(10),
1879 zx::BootDuration::from_millis(100),
1880 ],
1881 1000,
1882 zx::BootDuration::ZERO,
1883 TimerDuration::new(zx::BootDuration::from_millis(1), 1) ;
1884 "Zero duration gets the smallest timer duration"
1885 )]
1886 fn test_pick_setting(
1887 resolutions: Vec<zx::BootDuration>,
1888 max_ticks: u64,
1889 duration: zx::BootDuration,
1890 expected: TimerDuration,
1891 ) {
1892 let config = TimerConfig::new_from_data(MAIN_TIMER_ID as u64, &resolutions[..], max_ticks);
1893 let actual = config.pick_setting(duration);
1894
1895 assert_slack_eq(expected, actual);
1898 }
1899
1900 fn assert_slack_eq(expected: TimerDuration, actual: TimerDuration) {
1902 let slack = expected.duration() - actual.duration();
1903 assert_eq!(
1904 actual.resolution(),
1905 expected.resolution(),
1906 "\n\texpected: {} ({})\n\tactual : {} ({})\n\tslack: expected-actual={}",
1907 expected,
1908 format_duration(expected.duration()),
1909 actual,
1910 format_duration(actual.duration()),
1911 format_duration(slack)
1912 );
1913 assert_eq!(
1914 actual.ticks(),
1915 expected.ticks(),
1916 "\n\texpected: {} ({})\n\tactual : {} ({})\n\tslack: expected-actual={}",
1917 expected,
1918 format_duration(expected.duration()),
1919 actual,
1920 format_duration(actual.duration()),
1921 format_duration(slack)
1922 );
1923 }
1924
1925 #[derive(Debug)]
1926 enum FakeCmd {
1927 SetProperties {
1928 resolutions: Vec<zx::BootDuration>,
1929 max_ticks: i64,
1930 keep_alive: zx::EventPair,
1931 done: zx::Event,
1932 },
1933 }
1934
1935 use std::cell::RefCell;
1936 use std::rc::Rc;
1937
1938 fn fake_hrtimer_connection(
1944 scope: fasync::ScopeHandle,
1945 rcv: mpsc::Receiver<FakeCmd>,
1946 ) -> ffhh::DeviceProxy {
1947 debug!("fake_hrtimer_connection: entry.");
1948 let (hrtimer, mut stream) =
1949 fidl::endpoints::create_proxy_and_stream::<ffhh::DeviceMarker>();
1950 scope.clone().spawn_local(async move {
1951 let mut rcv = rcv.fuse();
1952 let timer_properties = Rc::new(RefCell::new(None));
1953 let wake_lease = Rc::new(RefCell::new(None));
1954
1955 let timer_running = Rc::new(RefCell::new(false));
1959
1960 loop {
1961 let timer_properties = timer_properties.clone();
1962 let wake_lease = wake_lease.clone();
1963 select! {
1964 cmd = rcv.next() => {
1965 debug!("fake_hrtimer_connection: cmd: {:?}", cmd);
1966 match cmd {
1967 Some(FakeCmd::SetProperties{ resolutions, max_ticks, keep_alive, done}) => {
1968 let mut timer_props = vec![];
1969 for v in 0..10 {
1970 timer_props.push(ffhh::TimerProperties {
1971 supported_resolutions: Some(
1972 resolutions.iter()
1973 .map(|d| ffhh::Resolution::Duration(d.into_nanos())).collect()),
1974 max_ticks: Some(max_ticks.try_into().unwrap()),
1975 supports_wait: Some(true),
1977 id: Some(v),
1978 ..Default::default()
1979 },
1980 );
1981 }
1982 *timer_properties.borrow_mut() = Some(timer_props);
1983 *wake_lease.borrow_mut() = Some(keep_alive);
1984 debug!("set timer properties to: {:?}", timer_properties);
1985 signal(&done);
1986 }
1987 e => {
1988 panic!("unrecognized command: {:?}", e);
1989 }
1990 }
1991 },
1993 event = stream.next() => {
1994 debug!("fake_hrtimer_connection: event: {:?}", event);
1995 if let Some(Ok(event)) = event {
1996 match event {
1997 ffhh::DeviceRequest::Start { responder, .. } => {
1998 assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
1999 *timer_running.borrow_mut() = true;
2000 responder.send(Ok(())).expect("");
2001 }
2002 ffhh::DeviceRequest::Stop { responder, .. } => {
2003 *timer_running.borrow_mut() = false;
2004 responder.send(Ok(())).expect("");
2005 }
2006 ffhh::DeviceRequest::GetTicksLeft { responder, .. } => {
2007 responder.send(Ok(1)).expect("");
2008 }
2009 ffhh::DeviceRequest::SetEvent { responder, .. } => {
2010 responder.send(Ok(())).expect("");
2011 }
2012 ffhh::DeviceRequest::StartAndWait { id, resolution, ticks, setup_event, responder, .. } => {
2013 assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2014 *timer_running.borrow_mut() = true;
2015 debug!("fake_hrtimer_connection: starting timer: \"{}\", resolution: {:?}, ticks: {}", id, resolution, ticks);
2016 let ticks: i64 = ticks.try_into().unwrap();
2017 let sleep_duration = zx::BootDuration::from_nanos(ticks * match resolution {
2018 ffhh::Resolution::Duration(e) => e,
2019 _ => {
2020 error!("resolution has an unexpected value");
2021 1
2022 }
2023 });
2024 let timer_running_clone = timer_running.clone();
2025 scope.spawn_local(async move {
2026 signal(&setup_event);
2029
2030 fasync::Timer::new(sleep_duration).await;
2033 *timer_running_clone.borrow_mut() = false;
2034 responder.send(Ok(clone_handle(wake_lease.borrow().as_ref().unwrap()))).unwrap();
2035 debug!("StartAndWait: hrtimer expired");
2036 });
2037 }
2038 ffhh::DeviceRequest::StartAndWait2 { responder, .. } => {
2039 assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2040 *timer_running.borrow_mut() = true;
2041 responder.send(Err(ffhh::DriverError::InternalError)).expect("");
2042 }
2043 ffhh::DeviceRequest::GetProperties { responder, .. } => {
2044 if (*timer_properties).borrow().is_none() {
2045 error!("timer_properties is empty, this is not what you want!");
2046 }
2047 responder
2048 .send(ffhh::Properties {
2049 timers_properties: (*timer_properties).borrow().clone(),
2050 ..Default::default()
2051 })
2052 .expect("");
2053 }
2054 ffhh::DeviceRequest::ReadTimer { responder, .. } => {
2055 responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2056 }
2057 ffhh::DeviceRequest::ReadClock { responder, .. } => {
2058 responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2059 }
2060 ffhh::DeviceRequest::_UnknownMethod { .. } => todo!(),
2061 }
2062 }
2063 },
2064 }
2065 }
2066 });
2067 hrtimer
2068 }
2069
2070 fn clone_utc_clock(orig: &fxr::UtcClock) -> fxr::UtcClock {
2071 orig.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()
2072 }
2073
2074 struct TestContext {
2075 wake_proxy: fta::WakeAlarmsProxy,
2076 _scope: fasync::Scope,
2077 _cmd_tx: mpsc::Sender<FakeCmd>,
2078 utc_clock: fxr::UtcClock,
2080 utc_backstop: fxr::UtcInstant,
2081 }
2082
2083 impl TestContext {
2084 async fn new() -> Self {
2085 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(0)).await;
2086
2087 let scope = fasync::Scope::new();
2088 let utc_backstop = fxr::UtcInstant::from_nanos(1000);
2089 let utc_clock =
2090 fxr::UtcClock::create(zx::ClockOpts::empty(), Some(utc_backstop)).unwrap();
2091 let utc_clone = clone_utc_clock(&utc_clock);
2092 let (mut cmd_tx, wake_proxy) = {
2093 let (tx, rx) = mpsc::channel::<FakeCmd>(0);
2094 let hrtimer_proxy = fake_hrtimer_connection(scope.to_handle(), rx);
2095
2096 let inspector = finspect::component::inspector();
2097 let alarms = Rc::new(Loop::new(
2098 scope.to_handle(),
2099 hrtimer_proxy,
2100 inspector.root().create_child("test"),
2101 utc_clone,
2102 ));
2103
2104 let (proxy, stream) =
2105 fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
2106 scope.spawn_local(async move {
2107 serve(alarms, stream).await;
2108 });
2109 (tx, proxy)
2110 };
2111
2112 let (_wake_lease, peer) = zx::EventPair::create();
2113 let done = zx::Event::create();
2114 cmd_tx
2115 .start_send(FakeCmd::SetProperties {
2116 resolutions: vec![zx::Duration::from_nanos(1)],
2117 max_ticks: 100,
2118 keep_alive: peer,
2119 done: done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2120 })
2121 .unwrap();
2122
2123 assert_matches!(fasync::OnSignals::new(done, zx::Signals::EVENT_SIGNALED).await, Ok(_));
2125
2126 Self { wake_proxy, _scope: scope, _cmd_tx: cmd_tx, utc_clock, utc_backstop }
2127 }
2128 }
2129
2130 impl Drop for TestContext {
2131 fn drop(&mut self) {
2132 assert_matches!(TestExecutor::next_timer(), None, "Unexpected lingering timers");
2133 }
2134 }
2135
2136 #[fuchsia::test(allow_stalls = false)]
2137 async fn test_basic_timed_wait() {
2138 let ctx = TestContext::new().await;
2139
2140 let deadline = zx::BootInstant::from_nanos(100);
2141 let setup_done = zx::Event::create();
2142 let mut set_task = ctx.wake_proxy.set_and_wait(
2143 deadline.into(),
2144 fta::SetMode::NotifySetupDone(
2145 setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2146 ),
2147 "Hello".into(),
2148 );
2149
2150 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task).await, Poll::Pending);
2151
2152 let mut setup_done_task = fasync::OnSignals::new(setup_done, zx::Signals::EVENT_SIGNALED);
2153 assert_matches!(
2154 TestExecutor::poll_until_stalled(&mut setup_done_task).await,
2155 Poll::Ready(Ok(_)),
2156 "Setup event not triggered after scheduling an alarm"
2157 );
2158
2159 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(100)).await;
2160 assert_matches!(TestExecutor::poll_until_stalled(set_task).await, Poll::Ready(Ok(Ok(_))));
2161 }
2162
2163 #[fuchsia::test(allow_stalls = false)]
2164 async fn test_basic_timed_wait_notify() {
2165 const ALARM_ID: &str = "Hello";
2166 let ctx = TestContext::new().await;
2167
2168 let (notifier_client, mut notifier_stream) =
2169 fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2170 let setup_done = zx::Event::create();
2171 assert_matches!(
2172 ctx.wake_proxy
2173 .set(
2174 notifier_client,
2175 fidl::BootInstant::from_nanos(2),
2176 fta::SetMode::NotifySetupDone(
2177 setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()
2178 ),
2179 ALARM_ID,
2180 )
2181 .await,
2182 Ok(Ok(()))
2183 );
2184
2185 let mut done_task = fasync::OnSignals::new(setup_done, zx::Signals::EVENT_SIGNALED);
2186 assert_matches!(
2187 TestExecutor::poll_until_stalled(&mut done_task).await,
2188 Poll::Ready(Ok(_)),
2189 "Setup event not triggered after scheduling an alarm"
2190 );
2191
2192 let mut next_task = notifier_stream.next();
2193 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2194
2195 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(1)).await;
2196 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2197
2198 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(2)).await;
2199 assert_matches!(
2200 TestExecutor::poll_until_stalled(next_task).await,
2201 Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2202 );
2203 }
2204
2205 #[fuchsia::test(allow_stalls = false)]
2206 async fn test_two_alarms_same() {
2207 const DEADLINE_NANOS: i64 = 100;
2208
2209 let ctx = TestContext::new().await;
2210
2211 let mut set_task_1 = ctx.wake_proxy.set_and_wait(
2212 fidl::BootInstant::from_nanos(DEADLINE_NANOS),
2213 fta::SetMode::KeepAlive(fake_wake_lease()),
2214 "Hello1".into(),
2215 );
2216 let mut set_task_2 = ctx.wake_proxy.set_and_wait(
2217 fidl::BootInstant::from_nanos(DEADLINE_NANOS),
2218 fta::SetMode::KeepAlive(fake_wake_lease()),
2219 "Hello2".into(),
2220 );
2221
2222 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2223 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2224
2225 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2226
2227 assert_matches!(
2228 TestExecutor::poll_until_stalled(&mut set_task_1).await,
2229 Poll::Ready(Ok(Ok(_)))
2230 );
2231 assert_matches!(
2232 TestExecutor::poll_until_stalled(&mut set_task_2).await,
2233 Poll::Ready(Ok(Ok(_)))
2234 );
2235 }
2236
2237 #[fuchsia::test(allow_stalls = false)]
2238 async fn test_two_alarms_same_notify() {
2239 const DEADLINE_NANOS: i64 = 100;
2240 const ALARM_ID_1: &str = "Hello1";
2241 const ALARM_ID_2: &str = "Hello2";
2242
2243 let ctx = TestContext::new().await;
2244
2245 let schedule = async |deadline_nanos: i64, alarm_id: &str| {
2246 let (notifier_client, notifier_stream) =
2247 fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2248 assert_matches!(
2249 ctx.wake_proxy
2250 .set(
2251 notifier_client,
2252 fidl::BootInstant::from_nanos(deadline_nanos),
2253 fta::SetMode::KeepAlive(fake_wake_lease()),
2254 alarm_id,
2255 )
2256 .await,
2257 Ok(Ok(()))
2258 );
2259 notifier_stream
2260 };
2261
2262 let mut notifier_1 = schedule(DEADLINE_NANOS, ALARM_ID_1).await;
2263 let mut notifier_2 = schedule(DEADLINE_NANOS, ALARM_ID_2).await;
2264
2265 let mut next_task_1 = notifier_1.next();
2266 let mut next_task_2 = notifier_2.next();
2267
2268 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2269 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_2).await, Poll::Pending);
2270
2271 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2272
2273 assert_matches!(
2274 TestExecutor::poll_until_stalled(&mut next_task_1).await,
2275 Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID_1
2276 );
2277 assert_matches!(
2278 TestExecutor::poll_until_stalled(&mut next_task_2).await,
2279 Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID_2
2280 );
2281
2282 assert_matches!(
2283 TestExecutor::poll_until_stalled(notifier_1.next()).await,
2284 Poll::Ready(None)
2285 );
2286 assert_matches!(
2287 TestExecutor::poll_until_stalled(notifier_2.next()).await,
2288 Poll::Ready(None)
2289 );
2290 }
2291
2292 #[test_case(100, 200 ; "push out")]
2293 #[test_case(200, 100 ; "pull in")]
2294 #[fuchsia::test(allow_stalls = false)]
2295 async fn test_two_alarms_different(
2296 first_deadline_nanos: i64,
2298 second_deadline_nanos: i64,
2300 ) {
2301 let ctx = TestContext::new().await;
2302
2303 let mut set_task_1 = ctx.wake_proxy.set_and_wait(
2304 fidl::BootInstant::from_nanos(first_deadline_nanos),
2305 fta::SetMode::KeepAlive(fake_wake_lease()),
2306 "Hello1".into(),
2307 );
2308 let mut set_task_2 = ctx.wake_proxy.set_and_wait(
2309 fidl::BootInstant::from_nanos(second_deadline_nanos),
2310 fta::SetMode::KeepAlive(fake_wake_lease()),
2311 "Hello2".into(),
2312 );
2313
2314 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2315 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2316
2317 let mut tasks = [(first_deadline_nanos, set_task_1), (second_deadline_nanos, set_task_2)];
2319 tasks.sort_by(|a, b| a.0.cmp(&b.0));
2320 let [mut first_task, mut second_task] = tasks;
2321
2322 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(first_task.0)).await;
2324 assert_matches!(
2325 TestExecutor::poll_until_stalled(&mut first_task.1).await,
2326 Poll::Ready(Ok(Ok(_)))
2327 );
2328 assert_matches!(TestExecutor::poll_until_stalled(&mut second_task.1).await, Poll::Pending);
2329
2330 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(second_task.0)).await;
2331 assert_matches!(
2332 TestExecutor::poll_until_stalled(&mut second_task.1).await,
2333 Poll::Ready(Ok(Ok(_)))
2334 );
2335 }
2336
2337 #[test_case(100, 200 ; "push out")]
2338 #[test_case(200, 100 ; "pull in")]
2339 #[fuchsia::test(allow_stalls = false)]
2340 async fn test_two_alarms_different_notify(
2341 first_deadline_nanos: i64,
2343 second_deadline_nanos: i64,
2345 ) {
2346 const ALARM_ID_1: &str = "Hello1";
2347 const ALARM_ID_2: &str = "Hello2";
2348
2349 let ctx = TestContext::new().await;
2350
2351 let schedule = async |deadline_nanos: i64, alarm_id: &str| {
2352 let (notifier_client, notifier_stream) =
2353 fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2354 assert_matches!(
2355 ctx.wake_proxy
2356 .set(
2357 notifier_client,
2358 fidl::BootInstant::from_nanos(deadline_nanos),
2359 fta::SetMode::KeepAlive(fake_wake_lease()),
2360 alarm_id,
2361 )
2362 .await,
2363 Ok(Ok(()))
2364 );
2365 notifier_stream
2366 };
2367
2368 let mut notifier_all = futures::stream::select_all([
2370 schedule(first_deadline_nanos, ALARM_ID_1).await,
2371 schedule(second_deadline_nanos, ALARM_ID_2).await,
2372 ]);
2373 let [(early_ns, early_alarm), (later_ns, later_alarm)] = {
2374 let mut tasks =
2375 [(first_deadline_nanos, ALARM_ID_1), (second_deadline_nanos, ALARM_ID_2)];
2376 tasks.sort_by(|a, b| a.0.cmp(&b.0));
2377 tasks
2378 };
2379
2380 let mut next_task = notifier_all.next();
2382 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2383
2384 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(early_ns)).await;
2385 assert_matches!(
2386 TestExecutor::poll_until_stalled(next_task).await,
2387 Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == early_alarm
2388 );
2389
2390 let mut next_task = notifier_all.next();
2391 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2392
2393 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(later_ns)).await;
2394 assert_matches!(
2395 TestExecutor::poll_until_stalled(next_task).await,
2396 Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == later_alarm
2397 );
2398 assert_matches!(
2399 TestExecutor::poll_until_stalled(notifier_all.next()).await,
2400 Poll::Ready(None)
2401 );
2402 }
2403
2404 #[fuchsia::test(allow_stalls = false)]
2405 async fn test_alarm_immediate() {
2406 let ctx = TestContext::new().await;
2407 let mut set_task = ctx.wake_proxy.set_and_wait(
2408 fidl::BootInstant::INFINITE_PAST,
2409 fta::SetMode::KeepAlive(fake_wake_lease()),
2410 "Hello1".into(),
2411 );
2412 assert_matches!(
2413 TestExecutor::poll_until_stalled(&mut set_task).await,
2414 Poll::Ready(Ok(Ok(_)))
2415 );
2416 }
2417
2418 #[fuchsia::test(allow_stalls = false)]
2419 async fn test_alarm_immediate_notify() {
2420 const ALARM_ID: &str = "Hello";
2421 let ctx = TestContext::new().await;
2422
2423 let (notifier_client, mut notifier_stream) =
2424 fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2425
2426 let mut set_task = ctx.wake_proxy.set(
2427 notifier_client,
2428 fidl::BootInstant::INFINITE_PAST,
2429 fta::SetMode::KeepAlive(fake_wake_lease()),
2430 ALARM_ID,
2431 );
2432 assert_matches!(
2433 TestExecutor::poll_until_stalled(&mut set_task).await,
2434 Poll::Ready(Ok(Ok(_)))
2435 );
2436 assert_matches!(
2437 TestExecutor::poll_until_stalled(notifier_stream.next()).await,
2438 Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2439 );
2440 }
2441
2442 #[test_case(200, 100 ; "pull in")]
2445 #[test_case(100, 200 ; "push out")]
2446 #[test_case(100, 100 ; "replace with the same deadline")]
2447 #[fuchsia::test(allow_stalls = false)]
2448 async fn test_reschedule(initial_deadline_nanos: i64, override_deadline_nanos: i64) {
2449 const ALARM_ID: &str = "Hello";
2450
2451 let ctx = TestContext::new().await;
2452
2453 let schedule = |deadline_nanos: i64| {
2454 let setup_done = zx::Event::create();
2455 let task = ctx.wake_proxy.set_and_wait(
2456 fidl::BootInstant::from_nanos(deadline_nanos),
2457 fta::SetMode::NotifySetupDone(
2458 setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2459 ),
2460 ALARM_ID.into(),
2461 );
2462 (task, setup_done)
2463 };
2464
2465 let (mut set_task_1, setup_done_1) = schedule(initial_deadline_nanos);
2468 fasync::OnSignals::new(setup_done_1, zx::Signals::EVENT_SIGNALED).await.unwrap();
2469 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2470
2471 let (mut set_task_2, setup_done_2) = schedule(override_deadline_nanos);
2474 fasync::OnSignals::new(setup_done_2, zx::Signals::EVENT_SIGNALED).await.unwrap();
2475 assert_matches!(
2476 TestExecutor::poll_until_stalled(&mut set_task_1).await,
2477 Poll::Ready(Ok(Err(fta::WakeAlarmsError::Dropped)))
2478 );
2479 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2480
2481 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(override_deadline_nanos - 1))
2483 .await;
2484 assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2485 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(override_deadline_nanos))
2486 .await;
2487 assert_matches!(
2488 TestExecutor::poll_until_stalled(&mut set_task_2).await,
2489 Poll::Ready(Ok(Ok(_)))
2490 );
2491
2492 assert_data_tree!(finspect::component::inspector(), root: {
2495 test: {
2496 hardware: {
2497 current_deadline: "(none)",
2499 remaining_until_alarm: "(none)",
2500 },
2501 now_formatted: format!("{override_deadline_nanos}ns ({override_deadline_nanos})"),
2502 now_ns: override_deadline_nanos,
2503 pending_timers: "Boot:\n\t\n\tUTC:\n\t",
2504 pending_timers_count: 0u64,
2505 requested_deadlines_ns: AnyProperty,
2506 schedule_delay_ns: AnyProperty,
2507 slack_ns: AnyProperty,
2508 boot_deadlines_count: AnyProperty,
2509 utc_deadlines_count: AnyProperty,
2510 debug_node: contains {},
2511 },
2512 });
2513 }
2514
2515 #[fuchsia::test(allow_stalls = false)]
2518 async fn test_reschedule_notify() {
2519 const ALARM_ID: &str = "Hello";
2520 const INITIAL_DEADLINE_NANOS: i64 = 100;
2521 const OVERRIDE_DEADLINE_NANOS: i64 = 200;
2522
2523 let ctx = TestContext::new().await;
2524
2525 let schedule = async |deadline_nanos: i64| {
2526 let (notifier_client, notifier_stream) =
2527 fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2528 assert_matches!(
2529 ctx.wake_proxy
2530 .set(
2531 notifier_client,
2532 fidl::BootInstant::from_nanos(deadline_nanos),
2533 fta::SetMode::KeepAlive(fake_wake_lease()),
2534 ALARM_ID.into(),
2535 )
2536 .await,
2537 Ok(Ok(()))
2538 );
2539 notifier_stream
2540 };
2541
2542 let mut notifier_1 = schedule(INITIAL_DEADLINE_NANOS).await;
2543 let mut next_task_1 = notifier_1.next();
2544 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2545
2546 let mut notifier_2 = schedule(OVERRIDE_DEADLINE_NANOS).await;
2547 let mut next_task_2 = notifier_2.next();
2548 assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_2).await, Poll::Pending);
2549
2550 assert_matches!(
2552 TestExecutor::poll_until_stalled(&mut next_task_1).await,
2553 Poll::Ready(Some(Ok(fta::NotifierRequest::NotifyError { alarm_id, error, .. }))) if alarm_id == ALARM_ID && error == fta::WakeAlarmsError::Dropped
2554 );
2555 assert_matches!(
2556 TestExecutor::poll_until_stalled(notifier_1.next()).await,
2557 Poll::Ready(None)
2558 );
2559
2560 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(OVERRIDE_DEADLINE_NANOS))
2562 .await;
2563 assert_matches!(
2564 TestExecutor::poll_until_stalled(next_task_2).await,
2565 Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2566 );
2567 assert_matches!(
2568 TestExecutor::poll_until_stalled(notifier_2.next()).await,
2569 Poll::Ready(None)
2570 );
2571 }
2572
2573 #[fuchsia::test(allow_stalls = false)]
2576 async fn test_fidl_error_on_reschedule() {
2577 const DEADLINE_NANOS: i64 = 100;
2578
2579 let (wake_proxy, _stream) =
2580 fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
2581 drop(_stream);
2582
2583 assert_matches!(
2584 wake_proxy
2585 .set_and_wait(
2586 zx::BootInstant::from_nanos(DEADLINE_NANOS).into(),
2587 fta::SetMode::KeepAlive(fake_wake_lease()),
2588 "hello1".into(),
2589 )
2590 .await,
2591 Err(fidl::Error::ClientChannelClosed { .. })
2592 );
2593
2594 assert_matches!(
2595 wake_proxy
2596 .set_and_wait(
2597 zx::BootInstant::from_nanos(DEADLINE_NANOS).into(),
2598 fta::SetMode::KeepAlive(fake_wake_lease()),
2599 "hello2".into(),
2600 )
2601 .await,
2602 Err(fidl::Error::ClientChannelClosed { .. })
2603 );
2604 }
2605
2606 #[fuchsia::test(allow_stalls = false)]
2609 async fn test_set_and_wait_utc() {
2610 const ALARM_ID: &str = "Hello_set_and_wait_utc";
2611 let ctx = TestContext::new().await;
2612
2613 let now_boot = fasync::BootInstant::now();
2614 ctx.utc_clock
2615 .update(
2616 zx::ClockUpdate::builder()
2617 .absolute_value(now_boot.into(), ctx.utc_backstop)
2618 .build(),
2619 )
2620 .unwrap();
2621
2622 let timestamp_utc = ctx.utc_backstop + fxr::UtcDuration::from_nanos(2);
2623 let mut wake_fut = ctx.wake_proxy.set_and_wait_utc(
2624 &fta::InstantUtc { timestamp_utc: timestamp_utc.into_nanos() },
2625 fta::SetMode::KeepAlive(fake_wake_lease()),
2626 ALARM_ID,
2627 );
2628
2629 assert_matches!(TestExecutor::poll_until_stalled(&mut wake_fut).await, Poll::Pending);
2631
2632 ctx.utc_clock
2634 .update(
2635 zx::ClockUpdate::builder()
2636 .absolute_value(
2637 now_boot.into(),
2638 ctx.utc_backstop + fxr::UtcDuration::from_nanos(100),
2639 )
2640 .build(),
2641 )
2642 .unwrap();
2643
2644 TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(1)).await;
2646 assert_matches!(TestExecutor::poll_until_stalled(wake_fut).await, Poll::Ready(_));
2647 }
2648}