alarms/
lib.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Alarm management subsystem.
6//!
7//! This subsystem serves the FIDL API `fuchsia.time.alarms/Wake`. To instantiate,
8//! you can use the following approach:
9//!
10//! ```ignore
11//! let proxy = client::connect_to_protocol::<ffhh::DeviceMarker>().map_err(
12//!    |e| error!("error: {}", e)).expect("add proper error handling");
13//!    let timer_loop = alarms::Handle::new(proxy);
14//! ```
15//!
16//! From here, use the standard approach with [ServiceFs::new] to expose the
17//! discoverable FIDL endpoint and call:
18//!
19//! ```ignore
20//! let stream: fidl_fuchsia_time_alarms::WakeRequestStream = ... ;
21//! alarms::serve(timer_loop, stream).await;
22//! // ...
23//! ```
24//!
25//! Of course, for everything to work well, your component will need appropriate
26//! capability routing.  Refer to capability routing docs for those details.
27
28mod emu;
29
30use crate::emu::EmulationTimerOps;
31use anyhow::{anyhow, Context, Result};
32use async_trait::async_trait;
33use fidl::encoding::ProxyChannelBox;
34use fidl::endpoints::RequestStream;
35use fidl::HandleBased;
36use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
37use fuchsia_inspect::{HistogramProperty, Property};
38use futures::channel::mpsc;
39use futures::sink::SinkExt;
40use futures::{StreamExt, TryStreamExt};
41use log::{debug, error, warn};
42use scopeguard::defer;
43use std::cell::RefCell;
44use std::cmp;
45use std::collections::{BTreeMap, BinaryHeap, HashMap};
46use std::rc::Rc;
47use std::sync::LazyLock;
48use time_pretty::{format_duration, format_timer, MSEC_IN_NANOS};
49use zx::AsHandleRef;
50use {
51    fidl_fuchsia_hardware_hrtimer as ffhh, fidl_fuchsia_time_alarms as fta,
52    fuchsia_async as fasync, fuchsia_inspect as finspect, fuchsia_trace as trace,
53};
54
55static I64_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i64::MAX.try_into().expect("infallible"));
56static I32_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i32::MAX.try_into().expect("infallible"));
57
58/// The largest value of timer "ticks" that is still considered useful.
59static MAX_USEFUL_TICKS: LazyLock<u64> = LazyLock::new(|| *I32_MAX_AS_U64);
60
61/// The hrtimer ID used for scheduling wake alarms.  This ID is reused from
62/// Starnix, and should eventually no longer be critical.
63const MAIN_TIMER_ID: usize = 6;
64
65/// This is what we consider a "long" delay in alarm operations.
66const LONG_DELAY_NANOS: i64 = 2000 * MSEC_IN_NANOS;
67
68/// A macro that waits on a future, but if the future takes longer than
69/// 30 seconds to complete, logs a warning message.
70macro_rules! log_long_op {
71    ($fut:expr) => {{
72        use futures::FutureExt;
73        let fut = $fut;
74        futures::pin_mut!(fut);
75        loop {
76            let timeout = fasync::Timer::new(std::time::Duration::from_secs(30));
77            futures::select! {
78                res = fut.as_mut().fuse() => {
79                    break res;
80                }
81                _ = timeout.fuse() => {
82                    warn!("unexpected blocking: long-running async operation at {}:{}", file!(), line!());
83                    #[cfg(all(target_os = "fuchsia", not(doc)))]
84                    ::debug::backtrace_request_all_threads();
85                }
86            }
87        }
88    }};
89}
90
91/// Compares two optional deadlines and returns true if the `before is different from `after.
92/// Nones compare as equal.
93fn is_deadline_changed(
94    before: Option<fasync::BootInstant>,
95    after: Option<fasync::BootInstant>,
96) -> bool {
97    match (before, after) {
98        (None, None) => false,
99        (None, Some(_)) | (Some(_), None) => true,
100        (Some(before), Some(after)) => before != after,
101    }
102}
103
104// Errors returnable from [TimerOps] calls.
105#[derive(Debug)]
106pub(crate) enum TimerOpsError {
107    /// The driver reported an error.
108    Driver(ffhh::DriverError),
109    /// FIDL-specific RPC error.
110    Fidl(fidl::Error),
111}
112
113trait SawResponseFut: std::future::Future<Output = Result<zx::EventPair, TimerOpsError>> {
114    // nop
115}
116
117/// Abstracts away timer operations.
118#[async_trait(?Send)]
119pub(crate) trait TimerOps {
120    /// Stop the timer with the specified ID.
121    async fn stop(&self, id: u64);
122
123    /// Examine the timer's properties, such as supported resolutions and tick
124    /// counts.
125    async fn get_timer_properties(&self) -> TimerConfig;
126
127    /// This method must return an actual future, to handle the borrow checker:
128    /// making this async will assume that `self` remains borrowed, which will
129    /// thwart attempts to move the return value of this call into a separate
130    /// closure.
131    fn start_and_wait(
132        &self,
133        id: u64,
134        resolution: &ffhh::Resolution,
135        ticks: u64,
136        setup_event: zx::Event,
137    ) -> std::pin::Pin<Box<dyn SawResponseFut>>;
138}
139
140/// TimerOps backed by an actual hardware timer.
141struct HardwareTimerOps {
142    proxy: ffhh::DeviceProxy,
143}
144
145impl HardwareTimerOps {
146    fn new(proxy: ffhh::DeviceProxy) -> Box<Self> {
147        Box::new(Self { proxy })
148    }
149}
150
151#[async_trait(?Send)]
152impl TimerOps for HardwareTimerOps {
153    async fn stop(&self, id: u64) {
154        let _ = self
155            .proxy
156            .stop(id)
157            .await
158            .map(|result| {
159                let _ = result.map_err(|e| warn!("stop_hrtimer: driver error: {:?}", e));
160            })
161            .map_err(|e| warn!("stop_hrtimer: could not stop prior timer: {}", e));
162    }
163
164    async fn get_timer_properties(&self) -> TimerConfig {
165        match log_long_op!(self.proxy.get_properties()) {
166            Ok(p) => {
167                let timers_properties = &p.timers_properties.expect("timers_properties must exist");
168                debug!("get_timer_properties: got: {:?}", timers_properties);
169
170                // Pick the correct hrtimer to use for wakes.
171                let timer_index = if timers_properties.len() > MAIN_TIMER_ID {
172                    // Mostly vim3, where we have pre-existing timer allocations
173                    // that we don't need to change.
174                    MAIN_TIMER_ID
175                } else if timers_properties.len() > 0 {
176                    // Newer devices that don't need to allocate timer IDs, and/or
177                    // may not even have as many timers as vim3 does. But, at least
178                    // one timer is needed.
179                    0
180                } else {
181                    // Give up.
182                    return TimerConfig::new_empty();
183                };
184                let main_timer_properties = &timers_properties[timer_index];
185                debug!("alarms: main_timer_properties: {:?}", main_timer_properties);
186                // Not sure whether it is useful to have more ticks than this, so limit it.
187                let max_ticks: u64 = std::cmp::min(
188                    main_timer_properties.max_ticks.unwrap_or(*MAX_USEFUL_TICKS),
189                    *MAX_USEFUL_TICKS,
190                );
191                let resolutions = &main_timer_properties
192                    .supported_resolutions
193                    .as_ref()
194                    .expect("supported_resolutions is populated")
195                    .iter()
196                    .last() //  Limits the resolution to the coarsest available.
197                    .map(|r| match *r {
198                        ffhh::Resolution::Duration(d) => d,
199                        _ => {
200                            error!(
201                            "get_timer_properties: Unknown resolution type, returning millisecond."
202                        );
203                            MSEC_IN_NANOS
204                        }
205                    })
206                    .map(|d| zx::BootDuration::from_nanos(d))
207                    .into_iter() // Used with .last() above.
208                    .collect::<Vec<_>>();
209                let timer_id = main_timer_properties.id.expect("timer ID is always present");
210                TimerConfig::new_from_data(timer_id, resolutions, max_ticks)
211            }
212            Err(e) => {
213                error!("could not get timer properties: {:?}", e);
214                TimerConfig::new_empty()
215            }
216        }
217    }
218
219    fn start_and_wait(
220        &self,
221        id: u64,
222        resolution: &ffhh::Resolution,
223        ticks: u64,
224        setup_event: zx::Event,
225    ) -> std::pin::Pin<Box<dyn SawResponseFut>> {
226        let inner = self.proxy.start_and_wait(id, resolution, ticks, setup_event);
227        Box::pin(HwResponseFut { pinner: Box::pin(inner) })
228    }
229}
230
231// Untangles the borrow checker issues that otherwise result from making
232// TimerOps::start_and_wait an async function.
233struct HwResponseFut {
234    pinner: std::pin::Pin<
235        Box<
236            fidl::client::QueryResponseFut<
237                ffhh::DeviceStartAndWaitResult,
238                fidl::encoding::DefaultFuchsiaResourceDialect,
239            >,
240        >,
241    >,
242}
243
244use std::task::Poll;
245impl SawResponseFut for HwResponseFut {}
246impl std::future::Future for HwResponseFut {
247    type Output = Result<zx::EventPair, TimerOpsError>;
248    fn poll(
249        mut self: std::pin::Pin<&mut Self>,
250        cx: &mut std::task::Context<'_>,
251    ) -> std::task::Poll<Self::Output> {
252        let inner_poll = self.pinner.as_mut().poll(cx);
253        match inner_poll {
254            Poll::Ready(result) => Poll::Ready(match result {
255                Ok(Ok(keep_alive)) => Ok(keep_alive),
256                Ok(Err(e)) => Err(TimerOpsError::Driver(e)),
257                Err(e) => Err(TimerOpsError::Fidl(e)),
258            }),
259            Poll::Pending => Poll::Pending,
260        }
261    }
262}
263
264/// Stops a currently running hardware timer.
265async fn stop_hrtimer(hrtimer: &Box<dyn TimerOps>, timer_config: &TimerConfig) {
266    trace::duration!(c"alarms", c"hrtimer:stop", "id" => timer_config.id);
267    debug!("stop_hrtimer: stopping hardware timer: {}", timer_config.id);
268    hrtimer.stop(timer_config.id).await;
269    debug!("stop_hrtimer: stopped  hardware timer: {}", timer_config.id);
270}
271
272// The default size of the channels created in this module.
273const CHANNEL_SIZE: usize = 100;
274
275trait AlarmResponder: std::fmt::Debug {
276    fn send(
277        &self,
278        alarm_id: &str,
279        result: Result<fidl::EventPair, fta::WakeAlarmsError>,
280    ) -> Option<Result<(), fidl::Error>>;
281}
282
283impl AlarmResponder for RefCell<Option<fta::WakeAlarmsSetAndWaitResponder>> {
284    fn send(
285        &self,
286        _alarm_id: &str,
287        result: Result<fidl::EventPair, fta::WakeAlarmsError>,
288    ) -> Option<Result<(), fidl::Error>> {
289        self.borrow_mut().take().map(|responder| responder.send(result))
290    }
291}
292
293impl AlarmResponder for RefCell<Option<fidl::endpoints::ClientEnd<fta::NotifierMarker>>> {
294    fn send(
295        &self,
296        alarm_id: &str,
297        result: Result<fidl::EventPair, fta::WakeAlarmsError>,
298    ) -> Option<Result<(), fidl::Error>> {
299        self.borrow_mut().take().map(|notifier| match result {
300            Ok(keep_alive) => notifier.into_proxy().notify(alarm_id, keep_alive),
301            Err(e) => notifier.into_proxy().notify_error(alarm_id, e),
302        })
303    }
304}
305
306/// A type handed around between the concurrent loops run by this module.
307#[derive(Debug)]
308enum Cmd {
309    /// Request a timer to be started.
310    Start {
311        /// The unique connection ID.
312        cid: zx::Koid,
313        /// A timestamp (presumably in the future), at which to expire the timer.
314        deadline: fasync::BootInstant,
315        // The API supports several modes. See fuchsia.time.alarms/Wake.fidl.
316        mode: fta::SetMode,
317        /// An alarm identifier, chosen by the caller.
318        alarm_id: String,
319        /// A responder that will be called when the timer expires. The
320        /// client end of the connection will block until we send something
321        /// on this responder.
322        ///
323        /// This is packaged into a Rc... only because both the "happy path"
324        /// and the error path must consume the responder.  This allows them
325        /// to be consumed, without the responder needing to implement Default.
326        responder: Rc<dyn AlarmResponder>,
327    },
328    StopById {
329        done: zx::Event,
330        timer_id: TimerId,
331    },
332    Alarm {
333        expired_deadline: fasync::BootInstant,
334        keep_alive: fidl::EventPair,
335    },
336    AlarmFidlError {
337        expired_deadline: fasync::BootInstant,
338        error: fidl::Error,
339    },
340    AlarmDriverError {
341        expired_deadline: fasync::BootInstant,
342        error: ffhh::DriverError,
343    },
344}
345
346impl std::fmt::Display for Cmd {
347    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
348        match self {
349            Cmd::Start { cid, deadline, alarm_id, .. } => {
350                write!(
351                    f,
352                    "Start[alarm_id=\"{}\", cid={:?}, deadline={}]",
353                    alarm_id,
354                    cid,
355                    format_timer((*deadline).into())
356                )
357            }
358            Cmd::Alarm { expired_deadline, .. } => {
359                write!(f, "Alarm[deadline={}]", format_timer((*expired_deadline).into()))
360            }
361            Cmd::AlarmFidlError { expired_deadline, error } => {
362                write!(
363                    f,
364                    "FIDLError[deadline={}, err={}, NO_WAKE_LEASE!]",
365                    format_timer((*expired_deadline).into()),
366                    error
367                )
368            }
369            Cmd::AlarmDriverError { expired_deadline, error } => {
370                write!(
371                    f,
372                    "DriverError[deadline={}, err={:?}, NO_WAKE_LEASE!]",
373                    format_timer((*expired_deadline).into()),
374                    error
375                )
376            }
377            Cmd::StopById { timer_id, done: _ } => {
378                write!(f, "StopById[timerId={}]", timer_id,)
379            }
380        }
381    }
382}
383
384/// Extracts a KOID from the underlying channel of the provided [stream].
385///
386/// # Returns
387/// - zx::Koid: the KOID you wanted.
388/// - fta::WakeAlarmsRequestStream: the stream; we had to deconstruct it briefly,
389///   so this gives it back to you.
390pub fn get_stream_koid(
391    stream: fta::WakeAlarmsRequestStream,
392) -> (zx::Koid, fta::WakeAlarmsRequestStream) {
393    let (inner, is_terminated) = stream.into_inner();
394    let koid = inner.channel().as_channel().get_koid().expect("infallible");
395    let stream = fta::WakeAlarmsRequestStream::from_inner(inner, is_terminated);
396    (koid, stream)
397}
398
399/// Serves a single Wake API client.
400pub async fn serve(timer_loop: Rc<Loop>, requests: fta::WakeAlarmsRequestStream) {
401    let timer_loop = timer_loop.clone();
402    let timer_loop_send = || timer_loop.get_sender();
403    let (cid, mut requests) = get_stream_koid(requests);
404    let mut request_count = 0;
405    debug!("alarms::serve: opened connection: {:?}", cid);
406    while let Some(maybe_request) = requests.next().await {
407        request_count += 1;
408        debug!("alarms::serve: cid: {:?} incoming request: {}", cid, request_count);
409        match maybe_request {
410            Ok(request) => {
411                // Should return quickly.
412                handle_request(cid, timer_loop_send(), request).await;
413            }
414            Err(e) => {
415                warn!("alarms::serve: error in request: {:?}", e);
416            }
417        }
418        debug!("alarms::serve: cid: {:?} done request: {}", cid, request_count);
419    }
420    // Check if connection closure was intentional. It is way too easy to close
421    // a FIDL connection inadvertently if doing non-mainstream things with FIDL.
422    warn!("alarms::serve: CLOSED CONNECTION: cid: {:?}", cid);
423}
424
425async fn handle_cancel(alarm_id: String, cid: zx::Koid, cmd: &mut mpsc::Sender<Cmd>) {
426    let done = zx::Event::create();
427    let timer_id = TimerId { alarm_id: alarm_id.clone(), cid };
428    if let Err(e) = cmd.send(Cmd::StopById { timer_id, done: clone_handle(&done) }).await {
429        warn!("handle_request: error while trying to cancel: {}: {:?}", alarm_id, e);
430    }
431    wait_signaled(&done).await;
432}
433
434/// Processes a single Wake API request from a single client.
435/// This function is expected to return quickly.
436///
437/// # Args
438/// - `cid`: the unique identifier of the connection producing these requests.
439/// - `cmd`: the outbound queue of commands to deliver to the timer manager.
440/// - `request`: a single inbound Wake FIDL API request.
441async fn handle_request(
442    cid: zx::Koid,
443    mut cmd: mpsc::Sender<Cmd>,
444    request: fta::WakeAlarmsRequest,
445) {
446    match request {
447        fta::WakeAlarmsRequest::SetAndWait { deadline, mode, alarm_id, responder } => {
448            // Since responder is consumed by the happy path and the error path, but not both,
449            // and because the responder does not implement Default, this is a way to
450            // send it in two mutually exclusive directions.  Each direction will reverse
451            // this wrapping once the responder makes it to the other side.
452            //
453            // Rc required because of sharing a noncopyable struct; RefCell required because
454            // borrow_mut() is needed to move out; and Option is required so we can
455            // use take() to replace the struct with None so it does not need to leave
456            // a Default in its place.
457            let responder = Rc::new(RefCell::new(Some(responder)));
458
459            // Alarm is not scheduled yet!
460            debug!(
461                "handle_request: scheduling alarm_id: \"{}\"\n\tcid: {:?}\n\tdeadline: {}",
462                alarm_id,
463                cid,
464                format_timer(deadline.into())
465            );
466            // Expected to return quickly.
467            if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
468                cid,
469                deadline: deadline.into(),
470                mode,
471                alarm_id: alarm_id.clone(),
472                responder: responder.clone(),
473            })) {
474                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
475                responder
476                    .borrow_mut()
477                    .take()
478                    .expect("always present if call fails")
479                    .send(Err(fta::WakeAlarmsError::Internal))
480                    .unwrap();
481            }
482        }
483        fta::WakeAlarmsRequest::Cancel { alarm_id, .. } => {
484            // TODO: b/383062441 - make this into an async task so that we wait
485            // less to schedule the next alarm.
486            log_long_op!(handle_cancel(alarm_id, cid, &mut cmd));
487        }
488        fta::WakeAlarmsRequest::Set { notifier, deadline, mode, alarm_id, responder } => {
489            // Alarm is not scheduled yet!
490            debug!(
491                "handle_request: scheduling alarm_id: \"{alarm_id}\"\n\tcid: {cid:?}\n\tdeadline: {}",
492                format_timer(deadline.into())
493            );
494            // Expected to return quickly.
495            if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
496                cid,
497                deadline: deadline.into(),
498                mode,
499                alarm_id: alarm_id.clone(),
500                responder: Rc::new(RefCell::new(Some(notifier))),
501            })) {
502                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
503                responder.send(Err(fta::WakeAlarmsError::Internal)).unwrap();
504            } else {
505                // Successfully scheduled the alarm.
506                responder.send(Ok(())).unwrap();
507            }
508        }
509        fta::WakeAlarmsRequest::_UnknownMethod { .. } => {}
510    };
511}
512
513/// Represents a single alarm event processing loop.
514///
515/// One instance is created per each alarm-capable low-level device.
516pub struct Loop {
517    // Given to any clients that need to send messages to `_task`
518    // via [get_sender].
519    snd: mpsc::Sender<Cmd>,
520}
521
522impl Loop {
523    /// Creates a new instance of [Loop].
524    ///
525    /// `device_proxy` is a connection to a low-level timer device.
526    pub fn new(
527        scope: fasync::ScopeHandle,
528        device_proxy: ffhh::DeviceProxy,
529        inspect: finspect::Node,
530    ) -> Self {
531        let hw_device_timer_ops = HardwareTimerOps::new(device_proxy);
532        Loop::new_internal(scope, hw_device_timer_ops, inspect)
533    }
534
535    // Creates a new instance of [Loop] with emulated wake alarms.
536    pub fn new_emulated(scope: fasync::ScopeHandle, inspect: finspect::Node) -> Self {
537        let timer_ops = Box::new(EmulationTimerOps::new());
538        Loop::new_internal(scope, timer_ops, inspect)
539    }
540
541    fn new_internal(
542        scope: fasync::ScopeHandle,
543        timer_ops: Box<dyn TimerOps>,
544        inspect: finspect::Node,
545    ) -> Self {
546        let (snd, rcv) = mpsc::channel(CHANNEL_SIZE);
547        let snd_clone = snd.clone();
548        let loop_scope = scope.clone();
549        scope.spawn_local(wake_timer_loop(loop_scope, snd_clone, rcv, timer_ops, inspect));
550        Self { snd }
551    }
552
553    /// Gets a copy of a channel through which async commands may be sent to
554    /// the [Loop].
555    fn get_sender(&self) -> mpsc::Sender<Cmd> {
556        self.snd.clone()
557    }
558}
559
560/// A representation of the state of a single Timer.
561#[derive(Debug)]
562struct TimerNode {
563    /// The deadline at which the timer expires.
564    deadline: fasync::BootInstant,
565    /// The unique alarm ID associated with this timer.
566    alarm_id: String,
567    /// The unique connection ID that this timer belongs to.  Multiple timers
568    /// may share the same `cid`.
569    cid: zx::Koid,
570    /// The responder that is blocked until the timer expires.  Used to notify
571    /// the alarms subsystem client when this alarm expires.
572    responder: Rc<dyn AlarmResponder>,
573}
574
575impl TimerNode {
576    fn new(
577        deadline: fasync::BootInstant,
578        alarm_id: String,
579        cid: zx::Koid,
580        responder: Rc<dyn AlarmResponder>,
581    ) -> Self {
582        Self { deadline, alarm_id, cid, responder }
583    }
584
585    fn get_alarm_id(&self) -> &str {
586        &self.alarm_id[..]
587    }
588
589    fn get_cid(&self) -> &zx::Koid {
590        &self.cid
591    }
592
593    fn get_id(&self) -> TimerId {
594        TimerId { alarm_id: self.alarm_id.clone(), cid: self.cid.clone() }
595    }
596
597    fn get_deadline(&self) -> &fasync::BootInstant {
598        &self.deadline
599    }
600}
601
602impl Drop for TimerNode {
603    // If the TimerNode was evicted without having expired, notify the other
604    // end that the timer has been canceled.
605    fn drop(&mut self) {
606        // If the TimerNode is dropped, notify the client that may have
607        // been waiting. We can not drop a responder, because that kills
608        // the FIDL connection.
609        if let Some(Err(e)) =
610            self.responder.send(&self.alarm_id, Err(fta::WakeAlarmsError::Dropped))
611        {
612            error!("could not drop responder: {:?}", e);
613        }
614    }
615}
616
617/// This and other comparison trait implementation are needed to establish
618/// a total ordering of TimerNodes.
619impl std::cmp::Eq for TimerNode {}
620
621impl std::cmp::PartialEq for TimerNode {
622    fn eq(&self, other: &Self) -> bool {
623        self.deadline == other.deadline && self.alarm_id == other.alarm_id && self.cid == other.cid
624    }
625}
626
627impl std::cmp::PartialOrd for TimerNode {
628    /// Order by deadline first, but timers with same deadline are ordered
629    /// by respective IDs to avoid ordering nondeterminism.
630    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
631        Some(self.cmp(other))
632    }
633}
634
635impl Ord for TimerNode {
636    /// Compares two [TimerNode]s, by "which is sooner".
637    ///
638    /// Ties are broken by alarm ID, then by connection ID.
639    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
640        let ordering = other.deadline.cmp(&self.deadline);
641        if ordering == std::cmp::Ordering::Equal {
642            let ordering = self.alarm_id.cmp(&self.alarm_id);
643            if ordering == std::cmp::Ordering::Equal {
644                self.cid.cmp(&other.cid)
645            } else {
646                ordering
647            }
648        } else {
649            ordering
650        }
651    }
652}
653
654/// A full timer identifier.
655#[derive(Debug, PartialEq, Eq, Hash)]
656struct TimerId {
657    /// Connection-unique alarm ID.
658    alarm_id: String,
659    /// Connection identifier, unique per each client connection.
660    cid: zx::Koid,
661}
662
663// Compute a trace ID for a given alarm ID. This identifier is used across
664// processes for tracking the alarm's lifetime.
665fn as_trace_id(alarm_id: &str) -> trace::Id {
666    if let Some(rest) = alarm_id.strip_prefix("starnix:Koid(") {
667        if let Some((koid_str, _)) = rest.split_once(')') {
668            if let Ok(trace_id) = koid_str.parse::<u64>() {
669                return trace_id.into();
670            }
671        }
672    }
673
674    // For now, other components don't have a specific way to get the trace id.
675    0.into()
676}
677
678impl std::fmt::Display for TimerId {
679    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
680        write!(f, "TimerId[alarm_id:{},cid:{:?}]", self.alarm_id, self.cid)
681    }
682}
683
684/// Contains all the timers known by the alarms subsystem.
685///
686/// [Timers] can efficiently find a timer with the earliest deadline,
687/// and given a cutoff can expire one timer for which the deadline has
688/// passed.
689struct Timers {
690    timers: BinaryHeap<TimerNode>,
691    deadline_by_id: HashMap<TimerId, fasync::BootInstant>,
692}
693
694impl std::fmt::Display for Timers {
695    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
696        let now = fasync::BootInstant::now();
697        let sorted = self
698            .timers
699            .iter()
700            .map(|n| (n.deadline, n.alarm_id.clone()))
701            .collect::<BTreeMap<_, _>>()
702            .into_iter()
703            .map(|(k, v)| {
704                let remaining = k - now;
705                format!(
706                    "Timeout: {} => timer_id: {}, remaining: {}",
707                    format_timer(k.into()),
708                    v,
709                    format_duration(remaining.into())
710                )
711            })
712            .collect::<Vec<_>>();
713        let joined = sorted.join("\n\t");
714        write!(f, "\n\t{}", joined)
715    }
716}
717
718impl Timers {
719    /// Creates an empty [AllTimers].
720    fn new() -> Self {
721        Self { timers: BinaryHeap::new(), deadline_by_id: HashMap::new() }
722    }
723
724    /// Adds a [TimerNode] to [Timers].
725    ///
726    /// If the inserted node is identical to an already existing node, then
727    /// nothing is changed.  If the deadline is different, then the timer node
728    /// is replaced.
729    fn push(&mut self, n: TimerNode) {
730        let new_id = n.get_id();
731        if let Some(deadline) = self.deadline_by_id.get(&new_id) {
732            // There already is a deadline for this timer.
733            if n.deadline == *deadline {
734                return;
735            }
736            // Else replace. The deadline may be pushed out or pulled in.
737            self.deadline_by_id.insert(new_id, n.deadline.clone());
738            self.timers.retain(|t| t.get_id() != n.get_id());
739            self.timers.push(n);
740        } else {
741            // New timer node.
742            self.deadline_by_id.insert(new_id, n.deadline);
743            self.timers.push(n);
744        }
745    }
746
747    /// Returns a reference to the stored timer with the earliest deadline.
748    fn peek(&self) -> Option<&TimerNode> {
749        self.timers.peek()
750    }
751
752    /// Returns the deadline of the proximate timer in [Timers].
753    fn peek_deadline(&self) -> Option<fasync::BootInstant> {
754        self.peek().map(|t| t.deadline)
755    }
756
757    fn peek_id(&self) -> Option<TimerId> {
758        self.peek().map(|t| TimerId { alarm_id: t.alarm_id.clone(), cid: t.cid })
759    }
760
761    /// Args:
762    /// - `now` is the current time.
763    /// - `deadline` is the timer deadline to check for expiry.
764    fn expired(now: fasync::BootInstant, deadline: fasync::BootInstant) -> bool {
765        deadline <= now
766    }
767
768    /// Returns true if there are no known timers.
769    fn is_empty(&self) -> bool {
770        let empty1 = self.timers.is_empty();
771        let empty2 = self.deadline_by_id.is_empty();
772        assert!(empty1 == empty2, "broken invariant: empty1: {} empty2:{}", empty1, empty2);
773        empty1
774    }
775
776    /// Attempts to expire the earliest timer.
777    ///
778    /// If a timer is expired, it is removed from [Timers] and returned to the caller. Note that
779    /// there may be more timers that need expiring at the provided `reference instant`. To drain
780    /// [Timers] of all expired timers, one must repeat the call to this method with the same
781    /// value of `reference_instant` until it returns `None`.
782    ///
783    /// Args:
784    /// - `now`: the time instant to compare the stored timers against.  Timers for
785    ///   which the deadline has been reached or surpassed are eligible for expiry.
786    fn maybe_expire_earliest(&mut self, now: fasync::BootInstant) -> Option<TimerNode> {
787        self.peek_deadline()
788            .map(|d| {
789                if Timers::expired(now, d) {
790                    self.timers.pop().map(|e| {
791                        self.deadline_by_id.remove(&e.get_id());
792                        e
793                    })
794                } else {
795                    None
796                }
797            })
798            .flatten()
799    }
800
801    /// Removes an alarm by ID.  If the earliest alarm is the alarm to be removed,
802    /// it is returned.
803    fn remove_by_id(&mut self, timer_id: &TimerId) -> Option<TimerNode> {
804        let ret = if let Some(t) = self.peek_id() {
805            if t == *timer_id {
806                self.timers.pop()
807            } else {
808                None
809            }
810        } else {
811            None
812        };
813
814        self.timers.retain(|t| t.alarm_id != timer_id.alarm_id || t.cid != timer_id.cid);
815        self.deadline_by_id.remove(timer_id);
816        ret
817    }
818
819    /// Returns the number of currently pending timers.
820    fn timer_count(&self) -> usize {
821        let count1 = self.timers.len();
822        let count2 = self.deadline_by_id.len();
823        assert!(count1 == count2, "broken invariant: count1: {}, count2: {}", count1, count2);
824        count1
825    }
826}
827
828// Clones a handle. Needed for 1:N notifications.
829pub(crate) fn clone_handle<H: HandleBased>(handle: &H) -> H {
830    handle.duplicate_handle(zx::Rights::SAME_RIGHTS).expect("infallible")
831}
832
833async fn wait_signaled<H: HandleBased>(handle: &H) {
834    fasync::OnSignals::new(handle, zx::Signals::EVENT_SIGNALED).await.expect("infallible");
835}
836
837pub(crate) fn signal<H: HandleBased>(handle: &H) {
838    handle.signal_handle(zx::Signals::NONE, zx::Signals::EVENT_SIGNALED).expect("infallible");
839}
840
841/// A [TimerDuration] represents a duration of time that can be expressed by
842/// a discrete timer register.
843///
844/// This is a low-level representation of time duration, used in interaction with
845/// hardware devices. It is therefore necessarily discretized, with adaptive
846/// resolution, depending on the physical characteristics of the underlying
847/// hardware timer that it models.
848#[derive(Debug, Clone, Copy)]
849struct TimerDuration {
850    // The resolution of each one of the `ticks` below.
851    resolution: zx::BootDuration,
852    // The number of ticks that encodes time duration. Each "tick" represents
853    // one unit of `resolution` above.
854    ticks: u64,
855}
856
857/// This and the comparison traits below are used to allow TimerDuration
858/// calculations in a compact form.
859impl Eq for TimerDuration {}
860
861impl std::cmp::PartialOrd for TimerDuration {
862    fn partial_cmp(&self, other: &TimerDuration) -> Option<std::cmp::Ordering> {
863        Some(self.cmp(other))
864    }
865}
866
867impl std::cmp::PartialEq for TimerDuration {
868    fn eq(&self, other: &Self) -> bool {
869        self.cmp(other) == std::cmp::Ordering::Equal
870    }
871}
872
873impl std::cmp::Ord for TimerDuration {
874    /// Two [TimerDuration]s compare equal if they model exactly the same duration of time,
875    /// no matter the resolutions.
876    fn cmp(&self, other: &TimerDuration) -> std::cmp::Ordering {
877        let self_ticks_128: i128 = self.ticks as i128;
878        let self_resolution: i128 = self.resolution_as_nanos() as i128;
879        let self_nanos = self_resolution * self_ticks_128;
880
881        let other_ticks_128: i128 = other.ticks as i128;
882        let other_resolution: i128 = other.resolution_as_nanos() as i128;
883        let other_nanos = other_resolution * other_ticks_128;
884
885        self_nanos.cmp(&other_nanos)
886    }
887}
888
889impl std::fmt::Display for TimerDuration {
890    /// Human readable TimerDuration exposes both the tick count and the resolution,
891    /// in the format of "ticks x resolution", with an end result of
892    /// `10x5ms` for example.
893    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
894        let ticks = self.ticks;
895        let resolution = self.resolution();
896        // Example: 10x1ms
897        write!(f, "{}x{}", ticks, format_duration(resolution),)
898    }
899}
900
901impl TimerDuration {
902    /// The maximum representable TimerDuration that we allow.
903    fn max() -> Self {
904        TimerDuration::new(zx::BootDuration::from_nanos(1), *I64_MAX_AS_U64)
905    }
906
907    /// The zero [TimerDuration].
908    fn zero() -> Self {
909        TimerDuration::new(zx::BootDuration::from_nanos(1), 0)
910    }
911
912    /// Creates a new timer duration with the given parameters.
913    fn new(resolution: zx::BootDuration, ticks: u64) -> Self {
914        Self { resolution, ticks }
915    }
916
917    /// Creates a new timer duration using the resolution from `res_source` and
918    /// a specified number of ticks.
919    fn new_with_resolution(res_source: &TimerDuration, ticks: u64) -> Self {
920        Self::new(res_source.resolution, ticks)
921    }
922
923    /// Returns the time duration represented by this TimerDuration.
924    ///
925    /// Due to the way duration is expressed, the same time duration
926    /// can be represented in multiple ways.
927    fn duration(&self) -> zx::BootDuration {
928        let duration_as_nanos = self.resolution_as_nanos() * self.ticks;
929        let clamp_duration = std::cmp::min(*I32_MAX_AS_U64, duration_as_nanos);
930        zx::BootDuration::from_nanos(clamp_duration.try_into().expect("result was clamped"))
931    }
932
933    /// The resolution of this TimerDuration
934    fn resolution(&self) -> zx::BootDuration {
935        self.resolution
936    }
937
938    fn resolution_as_nanos(&self) -> u64 {
939        self.resolution().into_nanos().try_into().expect("resolution is never negative")
940    }
941
942    /// The number of ticks of this [TimerDuration].
943    fn ticks(&self) -> u64 {
944        self.ticks
945    }
946}
947
948impl From<zx::BootDuration> for TimerDuration {
949    fn from(d: zx::BootDuration) -> TimerDuration {
950        let nanos = d.into_nanos();
951        assert!(nanos >= 0);
952        let nanos_u64 = nanos.try_into().expect("guarded by assert");
953        TimerDuration::new(zx::BootDuration::from_nanos(1), nanos_u64)
954    }
955}
956
957impl std::ops::Div for TimerDuration {
958    type Output = u64;
959    fn div(self, rhs: Self) -> Self::Output {
960        let self_nanos = self.resolution_as_nanos() * self.ticks;
961        let rhs_nanos = rhs.resolution_as_nanos() * rhs.ticks;
962        self_nanos / rhs_nanos
963    }
964}
965
966impl std::ops::Mul<u64> for TimerDuration {
967    type Output = Self;
968    fn mul(self, rhs: u64) -> Self::Output {
969        Self::new(self.resolution, self.ticks * rhs)
970    }
971}
972
973/// Contains the configuration of a specific timer.
974#[derive(Debug)]
975pub(crate) struct TimerConfig {
976    /// The resolutions supported by this timer. Each entry is one possible
977    /// duration for on timer "tick".  The resolution is picked when a timer
978    /// request is sent.
979    resolutions: Vec<zx::BootDuration>,
980    /// The maximum count of "ticks" that the timer supports. The timer usually
981    /// has a register that counts up or down based on a clock signal with
982    /// the period specified by `resolutions`.  This is the maximum value that
983    /// the counter can count to without overflowing.
984    max_ticks: u64,
985    /// The stable ID of the timer with the above configuration.
986    id: u64,
987}
988
989impl TimerConfig {
990    /// Creates a new timer config with supported timer resolutions and the max
991    /// ticks value for the timer's counter.
992    fn new_from_data(timer_id: u64, resolutions: &[zx::BootDuration], max_ticks: u64) -> Self {
993        debug!(
994            "TimerConfig: resolutions: {:?}, max_ticks: {}, timer_id: {}",
995            resolutions.iter().map(|r| format_duration(*r)).collect::<Vec<_>>(),
996            max_ticks,
997            timer_id
998        );
999        let resolutions = resolutions.iter().map(|d| *d).collect::<Vec<zx::BootDuration>>();
1000        TimerConfig { resolutions, max_ticks, id: timer_id }
1001    }
1002
1003    fn new_empty() -> Self {
1004        error!("TimerConfig::new_empty() called, this is not OK.");
1005        TimerConfig { resolutions: vec![], max_ticks: 0, id: 0 }
1006    }
1007
1008    // Picks the most appropriate timer setting for it to fire as close as possible
1009    // when `duration` expires.
1010    //
1011    // If duration is too far in the future for what the timer supports,
1012    // return a smaller value, to allow the timer to be reprogrammed multiple
1013    // times.
1014    //
1015    // If the available menu of resolutions is such that we can wake only after
1016    // the intended deadline, begrudgingly return that option.
1017    fn pick_setting(&self, duration: zx::BootDuration) -> TimerDuration {
1018        //  0         |-------------->|<---------------|
1019        //  |---------+---------------+----------------+---->
1020        //  |---------^               |                |
1021        //  | best positive slack     |                |
1022        //  |-------------------------^ duration       |
1023        //  |------------------------------------------^ best negative slack.
1024        let mut best_positive_slack = TimerDuration::zero();
1025        let mut best_negative_slack = TimerDuration::max();
1026
1027        if self.max_ticks == 0 {
1028            return TimerDuration::new(zx::BootDuration::from_millis(1), 0);
1029        }
1030        let duration_slack: TimerDuration = duration.into();
1031
1032        for res1 in self.resolutions.iter() {
1033            let smallest_unit = TimerDuration::new(*res1, 1);
1034            let max_tick_at_res = TimerDuration::new(*res1, self.max_ticks);
1035
1036            let smallest_slack_larger_than_duration = smallest_unit > duration_slack;
1037            let largest_slack_smaller_than_duration = max_tick_at_res < duration_slack;
1038
1039            if smallest_slack_larger_than_duration {
1040                if duration_slack == TimerDuration::zero() {
1041                    best_negative_slack = TimerDuration::zero();
1042                } else if smallest_unit < best_negative_slack {
1043                    best_negative_slack = smallest_unit;
1044                }
1045            }
1046            if largest_slack_smaller_than_duration {
1047                if max_tick_at_res > best_positive_slack
1048                    || best_positive_slack == TimerDuration::zero()
1049                {
1050                    best_positive_slack = max_tick_at_res;
1051                }
1052            }
1053
1054            // "Regular" case.
1055            if !smallest_slack_larger_than_duration && !largest_slack_smaller_than_duration {
1056                // Check whether duration divides evenly into the available slack options
1057                // for this resolution.  If it does, then that is the slack we're looking for.
1058                let q = duration_slack / smallest_unit;
1059                let d = smallest_unit * q;
1060                if d == duration_slack {
1061                    // Exact match, we can return right now.
1062                    return d;
1063                } else {
1064                    // Not an exact match, so q ticks is before, but q+1 is after.
1065                    if d > best_positive_slack {
1066                        best_positive_slack = TimerDuration::new_with_resolution(&smallest_unit, q);
1067                    }
1068                    let d_plus = TimerDuration::new_with_resolution(&smallest_unit, q + 1);
1069                    if d_plus < best_negative_slack {
1070                        best_negative_slack = d_plus;
1071                    }
1072                }
1073            }
1074        }
1075
1076        let p_slack = duration - best_positive_slack.duration();
1077        let n_slack = best_negative_slack.duration() - duration;
1078
1079        // If the closest approximation is 0ns, then we can not advance time, so we reject it.
1080        // Otherwise pick the smallest slack.  Note that when we pick the best positive slack,
1081        // we will wake *before* the actual deadline.  In multi-resolution counters, this enables
1082        // us to pick a finer count in the next go.
1083        let ret = if p_slack < n_slack && best_positive_slack.duration().into_nanos() > 0 {
1084            best_positive_slack
1085        } else {
1086            best_negative_slack
1087        };
1088        debug!("TimerConfig: picked slack: {} for duration: {}", ret, format_duration(duration));
1089        assert!(
1090            ret.duration().into_nanos() >= 0,
1091            "ret: {}, p_slack: {}, n_slack: {}, orig.duration: {}\n\tbest_p_slack: {}\n\tbest_n_slack: {}\n\ttarget: {}\n\t 1: {} 2: {:?}, 3: {:?}",
1092            ret,
1093            format_duration(p_slack),
1094            format_duration(n_slack),
1095            format_duration(duration),
1096            best_positive_slack,
1097            best_negative_slack,
1098            duration_slack,
1099            p_slack != zx::BootDuration::ZERO,
1100            p_slack,
1101            zx::BootDuration::ZERO,
1102        );
1103        ret
1104    }
1105}
1106
1107async fn get_timer_properties(hrtimer: &Box<dyn TimerOps>) -> TimerConfig {
1108    debug!("get_timer_properties: requesting timer properties.");
1109    hrtimer.get_timer_properties().await
1110}
1111
1112/// The state of a single hardware timer that we must bookkeep.
1113struct TimerState {
1114    // The task waiting for the proximate timer to expire.
1115    task: fasync::Task<()>,
1116    // The deadline that the above task is waiting for.
1117    deadline: fasync::BootInstant,
1118}
1119
1120/// The command loop for timer interaction.  All changes to the wake alarm device programming
1121/// come in form of commands through `cmd`.
1122///
1123/// Args:
1124/// - `snd`: the send end of `cmd` below, a clone is given to each spawned sub-task.
1125/// - `cmds``: the input queue of alarm related commands.
1126/// - `timer_proxy`: the FIDL API proxy for interacting with the hardware device.
1127/// - `inspect`: the inspect node to record loop info into.
1128async fn wake_timer_loop(
1129    scope: fasync::ScopeHandle,
1130    snd: mpsc::Sender<Cmd>,
1131    mut cmds: mpsc::Receiver<Cmd>,
1132    timer_proxy: Box<dyn TimerOps>,
1133    inspect: finspect::Node,
1134) {
1135    debug!("wake_timer_loop: started");
1136
1137    let mut timers = Timers::new();
1138    let timer_config = get_timer_properties(&timer_proxy).await;
1139
1140    // Keeps the currently executing HrTimer closure.  This is not read from, but
1141    // keeps the timer task active.
1142    #[allow(clippy::collection_is_never_read)]
1143    let mut hrtimer_status: Option<TimerState> = None;
1144
1145    // Initialize inspect properties. This must be done only once.
1146    //
1147    // Take note that these properties are updated when the `cmds` loop runs.
1148    // This means that repeated reads while no `cmds` activity occurs will return
1149    // old readings.  This is to ensure a consistent ability to replay the last
1150    // loop run if needed.
1151    let now_prop = inspect.create_int("now_ns", 0);
1152    let now_formatted_prop = inspect.create_string("now_formatted", "");
1153    let pending_timers_count_prop = inspect.create_uint("pending_timers_count", 0);
1154    let pending_timers_prop = inspect.create_string("pending_timers", "");
1155    let deadline_histogram_prop = inspect.create_int_exponential_histogram(
1156        "requested_deadlines_ns",
1157        finspect::ExponentialHistogramParams {
1158            floor: 0,
1159            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1160            // Allows capturing deadlines up to dozens of days.
1161            step_multiplier: 10,
1162            buckets: 16,
1163        },
1164    );
1165    let slack_histogram_prop = inspect.create_int_exponential_histogram(
1166        "slack_ns",
1167        finspect::ExponentialHistogramParams {
1168            floor: 0,
1169            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1170            step_multiplier: 10,
1171            buckets: 16,
1172        },
1173    );
1174    let schedule_delay_prop = inspect.create_int_exponential_histogram(
1175        "schedule_delay_ns",
1176        finspect::ExponentialHistogramParams {
1177            floor: 0,
1178            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1179            step_multiplier: 10,
1180            buckets: 16,
1181        },
1182    );
1183    // Internals of what was programmed into the wake alarms hardware.
1184    let hw_node = inspect.create_child("hardware");
1185    let current_hw_deadline_prop = hw_node.create_string("current_deadline", "");
1186    let remaining_until_alarm_prop = hw_node.create_string("remaining_until_alarm", "");
1187
1188    while let Some(cmd) = cmds.next().await {
1189        trace::duration!(c"alarms", c"Cmd");
1190        // Use a consistent notion of "now" across commands.
1191        let now = fasync::BootInstant::now();
1192        now_prop.set(now.into_nanos());
1193        trace::instant!(c"alarms", c"wake_timer_loop", trace::Scope::Process, "now" => now.into_nanos());
1194        match cmd {
1195            Cmd::Start { cid, deadline, mode, alarm_id, responder } => {
1196                trace::duration!(c"alarms", c"Cmd::Start");
1197                fuchsia_trace::flow_step!(c"alarms", c"hrtimer_lifecycle", as_trace_id(&alarm_id));
1198                // NOTE: hold keep_alive until all work is done.
1199                debug!(
1200                    "wake_timer_loop: START alarm_id: \"{}\", cid: {:?}\n\tdeadline: {}\n\tnow:      {}",
1201                    alarm_id,
1202                    cid,
1203                    format_timer(deadline.into()),
1204                    format_timer(now.into()),
1205                );
1206
1207                defer! {
1208                    // This is the only option that requires further action.
1209                    if let fta::SetMode::NotifySetupDone(setup_done) = mode {
1210                        // Must signal once the setup is completed.
1211                        signal(&setup_done);
1212                        debug!("wake_timer_loop: START: setup_done signaled");
1213                    };
1214                }
1215                deadline_histogram_prop.insert((deadline - now).into_nanos());
1216                if Timers::expired(now, deadline) {
1217                    trace::duration!(c"alarms", c"Cmd::Start:immediate");
1218                    fuchsia_trace::flow_step!(
1219                        c"alarms",
1220                        c"hrtimer_lifecycle",
1221                        as_trace_id(&alarm_id)
1222                    );
1223                    // A timer set into now or the past expires right away.
1224                    let (_lease, keep_alive) = zx::EventPair::create();
1225                    debug!(
1226                        "[{}] wake_timer_loop: bogus lease {:?}",
1227                        line!(),
1228                        &keep_alive.get_koid().unwrap()
1229                    );
1230
1231                    if let Err(e) = responder
1232                        .send(&alarm_id, Ok(keep_alive))
1233                        .expect("responder is always present")
1234                    {
1235                        error!(
1236                            "wake_timer_loop: cid: {cid:?}, alarm: {alarm_id}: could not notify, dropping: {e}",
1237                        );
1238                    } else {
1239                        debug!(
1240                            "wake_timer_loop: cid: {cid:?}, alarm: {alarm_id}: EXPIRED IMMEDIATELY\n\tdeadline({}) <= now({})",
1241                            format_timer(deadline.into()),
1242                            format_timer(now.into()),
1243                        )
1244                    }
1245                } else {
1246                    trace::duration!(c"alarms", c"Cmd::Start:regular");
1247                    fuchsia_trace::flow_step!(
1248                        c"alarms",
1249                        c"hrtimer_lifecycle",
1250                        as_trace_id(&alarm_id)
1251                    );
1252                    // A timer scheduled for the future gets inserted into the timer heap.
1253                    let was_empty = timers.is_empty();
1254
1255                    let deadline_before = timers.peek_deadline();
1256                    timers.push(TimerNode::new(deadline, alarm_id, cid, responder));
1257                    let deadline_after = timers.peek_deadline();
1258
1259                    let deadline_changed = is_deadline_changed(deadline_before, deadline_after);
1260                    let needs_cancel = !was_empty && deadline_changed;
1261                    let needs_reschedule = was_empty || deadline_changed;
1262
1263                    if needs_reschedule {
1264                        // Always schedule the proximate deadline.
1265                        let schedulable_deadline = deadline_after.unwrap_or(deadline);
1266                        if needs_cancel {
1267                            log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1268                        }
1269                        hrtimer_status = Some(
1270                            schedule_hrtimer(
1271                                scope.clone(),
1272                                now,
1273                                &timer_proxy,
1274                                schedulable_deadline,
1275                                snd.clone(),
1276                                &timer_config,
1277                                &schedule_delay_prop,
1278                            )
1279                            .await,
1280                        );
1281                    }
1282                }
1283            }
1284            Cmd::StopById { timer_id, done } => {
1285                trace::duration!(c"alarms", c"Cmd::StopById", "alarm_id" => &timer_id.alarm_id[..]);
1286                fuchsia_trace::flow_step!(
1287                    c"alarms",
1288                    c"hrtimer_lifecycle",
1289                    as_trace_id(&timer_id.alarm_id)
1290                );
1291                debug!("wake_timer_loop: STOP timer: {}", timer_id);
1292                let deadline_before = timers.peek_deadline();
1293
1294                if let Some(timer_node) = timers.remove_by_id(&timer_id) {
1295                    let deadline_after = timers.peek_deadline();
1296
1297                    if let Some(res) = timer_node
1298                        .responder
1299                        .send(&timer_node.alarm_id, Err(fta::WakeAlarmsError::Dropped))
1300                    {
1301                        // We must reply to the responder to keep the connection open.
1302                        res.expect("infallible");
1303                    }
1304                    if is_deadline_changed(deadline_before, deadline_after) {
1305                        log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1306                    }
1307                    if let Some(deadline) = deadline_after {
1308                        // Reschedule the hardware timer if the removed timer is the earliest one,
1309                        // and another one exists.
1310                        let new_timer_state = schedule_hrtimer(
1311                            scope.clone(),
1312                            now,
1313                            &timer_proxy,
1314                            deadline,
1315                            snd.clone(),
1316                            &timer_config,
1317                            &schedule_delay_prop,
1318                        )
1319                        .await;
1320                        let old_hrtimer_status = hrtimer_status.replace(new_timer_state);
1321                        if let Some(task) = old_hrtimer_status.map(|ev| ev.task) {
1322                            // Allow the task to complete, I suppose.
1323                            task.await;
1324                        }
1325                    } else {
1326                        // No next timer, clean up the hrtimer status.
1327                        hrtimer_status = None;
1328                    }
1329                } else {
1330                    debug!("wake_timer_loop: STOP: no active timer to stop: {}", timer_id);
1331                }
1332                signal(&done);
1333            }
1334            Cmd::Alarm { expired_deadline, keep_alive } => {
1335                trace::duration!(c"alarms", c"Cmd::Alarm");
1336                // Expire all eligible timers, based on "now".  This is because
1337                // we may have woken up earlier than the actual deadline. This
1338                // happens for example if the timer can not make the actual
1339                // deadline and needs to be re-programmed.
1340                debug!(
1341                    "wake_timer_loop: ALARM!!! reached deadline: {}, wakey-wakey! {:?}",
1342                    format_timer(expired_deadline.into()),
1343                    keep_alive.get_koid().unwrap(),
1344                );
1345                let expired_count =
1346                    notify_all(&mut timers, &keep_alive, now, &slack_histogram_prop)
1347                        .expect("notification succeeds");
1348                if expired_count == 0 {
1349                    // This could be a resolution switch, or a straggler notification.
1350                    // Either way, the hardware timer is still ticking, cancel it.
1351                    debug!("wake_timer_loop: no expired alarms, reset hrtimer state");
1352                    log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1353                }
1354                // There is a timer to reschedule, do that now.
1355                hrtimer_status = match timers.peek_deadline() {
1356                    None => None,
1357                    Some(deadline) => Some(
1358                        schedule_hrtimer(
1359                            scope.clone(),
1360                            now,
1361                            &timer_proxy,
1362                            deadline,
1363                            snd.clone(),
1364                            &timer_config,
1365                            &schedule_delay_prop,
1366                        )
1367                        .await,
1368                    ),
1369                }
1370            }
1371            Cmd::AlarmFidlError { expired_deadline, error } => {
1372                trace::duration!(c"alarms", c"Cmd::AlarmFidlError");
1373                // We do not have a wake lease, so the system may sleep before
1374                // we get to schedule a new timer. We have no way to avoid it
1375                // today.
1376                warn!(
1377                    "wake_timer_loop: FIDL error: {:?}, deadline: {}, now: {}",
1378                    error,
1379                    format_timer(expired_deadline.into()),
1380                    format_timer(now.into()),
1381                );
1382                // Manufacture a fake lease to make the code below work.
1383                // Maybe use Option instead?
1384                let (_dummy_lease, peer) = zx::EventPair::create();
1385                debug!("XXX: [{}] bogus lease: 1 {:?}", line!(), &peer.get_koid().unwrap());
1386                notify_all(&mut timers, &peer, now, &slack_histogram_prop)
1387                    .expect("notification succeeds");
1388                hrtimer_status = match timers.peek_deadline() {
1389                    None => None, // No remaining timers, nothing to schedule.
1390                    Some(deadline) => Some(
1391                        schedule_hrtimer(
1392                            scope.clone(),
1393                            now,
1394                            &timer_proxy,
1395                            deadline,
1396                            snd.clone(),
1397                            &timer_config,
1398                            &schedule_delay_prop,
1399                        )
1400                        .await,
1401                    ),
1402                }
1403            }
1404            Cmd::AlarmDriverError { expired_deadline, error } => {
1405                trace::duration!(c"alarms", c"Cmd::AlarmDriverError");
1406                let (_dummy_lease, peer) = zx::EventPair::create();
1407                debug!("XXX: [{}] bogus lease: {:?}", line!(), &peer.get_koid().unwrap());
1408                notify_all(&mut timers, &peer, now, &slack_histogram_prop)
1409                    .expect("notification succeeds");
1410                match error {
1411                    fidl_fuchsia_hardware_hrtimer::DriverError::Canceled => {
1412                        // Nothing to do here, cancelation is handled in Stop code.
1413                        debug!(
1414                            "wake_timer_loop: CANCELED timer at deadline: {}",
1415                            format_timer(expired_deadline.into())
1416                        );
1417                    }
1418                    _ => {
1419                        error!(
1420                            "wake_timer_loop: DRIVER SAYS: {:?}, deadline: {}, now: {}",
1421                            error,
1422                            format_timer(expired_deadline.into()),
1423                            format_timer(now.into()),
1424                        );
1425                        // We do not have a wake lease, so the system may sleep before
1426                        // we get to schedule a new timer. We have no way to avoid it
1427                        // today.
1428                        hrtimer_status = match timers.peek_deadline() {
1429                            None => None,
1430                            Some(deadline) => Some(
1431                                schedule_hrtimer(
1432                                    scope.clone(),
1433                                    now,
1434                                    &timer_proxy,
1435                                    deadline,
1436                                    snd.clone(),
1437                                    &timer_config,
1438                                    &schedule_delay_prop,
1439                                )
1440                                .await,
1441                            ),
1442                        }
1443                    }
1444                }
1445            }
1446        }
1447
1448        {
1449            // Print and record diagnostics after each iteration, record the
1450            // duration for performance awareness.  Note that iterations happen
1451            // only occasionally, so these stats can remain unchanged for a long
1452            // time.
1453            trace::duration!(c"timekeeper", c"inspect");
1454            let now_formatted = format_timer(now.into());
1455            debug!("wake_timer_loop: now:                             {}", &now_formatted);
1456            now_formatted_prop.set(&now_formatted);
1457
1458            let pending_timers_count: u64 =
1459                timers.timer_count().try_into().expect("always convertible");
1460            debug!("wake_timer_loop: currently pending timer count:   {}", pending_timers_count);
1461            pending_timers_count_prop.set(pending_timers_count);
1462
1463            let pending_timers = format!("{}", timers);
1464            debug!("wake_timer_loop: currently pending timers:        {}", &timers);
1465            pending_timers_prop.set(&pending_timers);
1466
1467            let current_deadline: String = hrtimer_status
1468                .as_ref()
1469                .map(|s| format!("{}", format_timer(s.deadline.into())))
1470                .unwrap_or_else(|| "(none)".into());
1471            debug!("wake_timer_loop: current hardware timer deadline: {:?}", current_deadline);
1472            current_hw_deadline_prop.set(&current_deadline);
1473
1474            let remaining_duration_until_alarm = hrtimer_status
1475                .as_ref()
1476                .map(|s| format!("{}", format_duration((s.deadline - now).into())))
1477                .unwrap_or_else(|| "(none)".into());
1478            debug!(
1479                "wake_timer_loop: remaining duration until alarm:  {}",
1480                &remaining_duration_until_alarm
1481            );
1482            remaining_until_alarm_prop.set(&remaining_duration_until_alarm);
1483            debug!("---");
1484        }
1485    }
1486
1487    debug!("wake_timer_loop: exiting. This is unlikely in prod code.");
1488}
1489
1490/// Schedules a wake alarm.
1491///
1492/// Args:
1493/// - `scope`: used to spawn async tasks.
1494/// - `now`: the time instant used as the value of current instant.
1495/// - `hrtimer`: the proxy for the hrtimer device driver.
1496/// - `deadline`: the time instant in the future at which the alarm should fire.
1497/// - `command_send`: the sender channel to use when the timer expires.
1498/// - `timer_config`: a configuration of the hardware timer showing supported resolutions and
1499///   max tick value.
1500/// - `needs_cancel`: if set, we must first cancel a hrtimer before scheduling a new one.
1501async fn schedule_hrtimer(
1502    scope: fasync::ScopeHandle,
1503    now: fasync::BootInstant,
1504    hrtimer: &Box<dyn TimerOps>,
1505    deadline: fasync::BootInstant,
1506    mut command_send: mpsc::Sender<Cmd>,
1507    timer_config: &TimerConfig,
1508    schedule_delay_histogram: &finspect::IntExponentialHistogramProperty,
1509) -> TimerState {
1510    let timeout = std::cmp::max(zx::BootDuration::ZERO, deadline - now);
1511    trace::duration!(c"alarms", c"schedule_hrtimer", "timeout" => timeout.into_nanos());
1512    // When signaled, the hrtimer has been scheduled.
1513    let hrtimer_scheduled = zx::Event::create();
1514
1515    debug!(
1516        "schedule_hrtimer:\n\tnow: {}\n\tdeadline: {}\n\ttimeout: {}",
1517        format_timer(now.into()),
1518        format_timer(deadline.into()),
1519        format_duration(timeout),
1520    );
1521
1522    let slack = timer_config.pick_setting(timeout);
1523
1524    let resolution_nanos = slack.resolution.into_nanos();
1525    let ticks = slack.ticks();
1526    trace::instant!(c"alarms", c"hrtimer:programmed",
1527        trace::Scope::Process,
1528        "resolution_ns" => resolution_nanos,
1529        "ticks" => ticks
1530    );
1531    let start_and_wait_fut = hrtimer.start_and_wait(
1532        timer_config.id,
1533        &ffhh::Resolution::Duration(resolution_nanos),
1534        ticks,
1535        clone_handle(&hrtimer_scheduled),
1536    );
1537    let hrtimer_scheduled_if_error = clone_handle(&hrtimer_scheduled);
1538    let hrtimer_task = scope.spawn_local(async move {
1539        debug!("hrtimer_task: waiting for hrtimer driver response");
1540        trace::instant!(c"alarms", c"hrtimer:started", trace::Scope::Process);
1541        let response = start_and_wait_fut.await;
1542        trace::instant!(c"alarms", c"hrtimer:response", trace::Scope::Process);
1543        match response {
1544            Err(TimerOpsError::Fidl(e)) => {
1545                defer! {
1546                    // Allow hrtimer_scheduled to proceed anyways.
1547                    signal(&hrtimer_scheduled_if_error);
1548                }
1549                trace::instant!(c"alarms", c"hrtimer:response:fidl_error", trace::Scope::Process);
1550                warn!("hrtimer_task: hrtimer FIDL error: {:?}", e);
1551                command_send
1552                    .start_send(Cmd::AlarmFidlError { expired_deadline: now, error: e })
1553                    .unwrap();
1554                // BAD: no way to keep alive.
1555            }
1556            Err(TimerOpsError::Driver(e)) => {
1557                defer! {
1558                    // This should be idempotent if the error occurs after
1559                    // the timer was scheduled.
1560                    signal(&hrtimer_scheduled_if_error);
1561                }
1562                let driver_error_str = format!("{:?}", e);
1563                trace::instant!(c"alarms", c"hrtimer:response:driver_error", trace::Scope::Process, "error" => &driver_error_str[..]);
1564                warn!("schedule_hrtimer: hrtimer driver error: {:?}", e);
1565                command_send
1566                    .start_send(Cmd::AlarmDriverError { expired_deadline: now, error: e })
1567                    .unwrap();
1568                // BAD: no way to keep alive.
1569            }
1570            Ok(keep_alive) => {
1571                trace::instant!(c"alarms", c"hrtimer:response:alarm", trace::Scope::Process);
1572                debug!("hrtimer: got alarm response: {:?}", keep_alive);
1573                // May trigger sooner than the deadline.
1574                command_send
1575                    .start_send(Cmd::Alarm { expired_deadline: deadline, keep_alive })
1576                    .unwrap();
1577            }
1578        }
1579        debug!("hrtimer_task: exiting task.");
1580        trace::instant!(c"alarms", c"hrtimer:task_exit", trace::Scope::Process);
1581    }).into();
1582    debug!("schedule_hrtimer: waiting for event to be signaled");
1583
1584    // We must wait here to ensure that the wake alarm has been scheduled.
1585    log_long_op!(wait_signaled(&hrtimer_scheduled));
1586
1587    let now_after_signaled = fasync::BootInstant::now();
1588    let duration_until_scheduled: zx::BootDuration = (now_after_signaled - now).into();
1589    if duration_until_scheduled > zx::BootDuration::from_nanos(LONG_DELAY_NANOS) {
1590        trace::duration!(c"alarms", c"schedule_hrtimer:unusual_duration",
1591            "duration" => duration_until_scheduled.into_nanos());
1592        warn!(
1593            "unusual duration until hrtimer scheduled: {}",
1594            format_duration(duration_until_scheduled)
1595        );
1596    }
1597    schedule_delay_histogram.insert(duration_until_scheduled.into_nanos());
1598    debug!("schedule_hrtimer: hrtimer wake alarm has been scheduled.");
1599    TimerState { task: hrtimer_task, deadline }
1600}
1601
1602/// Notify all `timers` that `reference_instant` has been reached.
1603///
1604/// The notified `timers` are removed from the list of timers to notify.
1605///
1606/// Args:
1607/// - `timers`: the collection of currently available timers.
1608/// - `lease_prototype`: an EventPair used as a wake lease.
1609/// - `reference_instant`: the time instant used as a reference for alarm notification.
1610///   All timers
1611fn notify_all(
1612    timers: &mut Timers,
1613    lease_prototype: &zx::EventPair,
1614    reference_instant: fasync::BootInstant,
1615    unusual_slack_histogram: &finspect::IntExponentialHistogramProperty,
1616) -> Result<usize> {
1617    trace::duration!(c"alarms", c"notify_all");
1618    let now = fasync::BootInstant::now();
1619    let mut expired = 0;
1620    while let Some(timer_node) = timers.maybe_expire_earliest(reference_instant) {
1621        expired += 1;
1622        // How much later than requested did the notification happen.
1623        let deadline = *timer_node.get_deadline();
1624        let alarm_id = timer_node.get_alarm_id().to_string();
1625        trace::duration!(c"alarms", c"notify_all:notified", "alarm_id" => &*alarm_id);
1626        fuchsia_trace::flow_step!(c"alarms", c"hrtimer_lifecycle", as_trace_id(&alarm_id));
1627        let cid = timer_node.get_cid().clone();
1628        let slack: zx::BootDuration = deadline - now;
1629        if slack < zx::BootDuration::from_nanos(-LONG_DELAY_NANOS) {
1630            trace::duration!(c"alarms", c"schedule_hrtimer:unusual_slack", "slack" => slack.into_nanos());
1631            // This alarm triggered noticeably later than it should have.
1632            warn!(
1633                "alarm id: {} had an unusually large slack: {}",
1634                alarm_id,
1635                format_duration(slack)
1636            );
1637        }
1638        if slack < zx::BootDuration::ZERO {
1639            unusual_slack_histogram.insert(-slack.into_nanos());
1640        }
1641        debug!(
1642            concat!(
1643                "wake_alarm_loop: ALARM alarm_id: \"{}\"\n\tdeadline: {},\n\tcid: {:?},\n\t",
1644                "reference_instant: {},\n\tnow: {},\n\tslack: {}",
1645            ),
1646            alarm_id,
1647            format_timer(deadline.into()),
1648            cid,
1649            format_timer(reference_instant.into()),
1650            format_timer(now.into()),
1651            format_duration(slack),
1652        );
1653        let lease = clone_handle(lease_prototype);
1654        trace::instant!(c"alarms", c"notify", trace::Scope::Process, "alarm_id" => &alarm_id[..], "cid" => cid);
1655        if let Some(Err(e)) = timer_node.responder.send(&timer_node.alarm_id, Ok(lease)) {
1656            error!("could not signal responder: {:?}", e);
1657        }
1658        trace::instant!(c"alarms", c"notified", trace::Scope::Process);
1659    }
1660    trace::instant!(c"alarms", c"notify", trace::Scope::Process, "expired_count" => expired);
1661    debug!("notify_all: expired count: {}", expired);
1662    Ok(expired)
1663    // A new timer is not scheduled yet here.
1664}
1665
1666/// The hrtimer driver service directory.  hrtimer driver APIs appear as randomly
1667/// named files in this directory. They are expected to come and go.
1668const HRTIMER_DIRECTORY: &str = "/dev/class/hrtimer";
1669
1670/// Connects to the high resolution timer device driver.
1671pub async fn connect_to_hrtimer_async() -> Result<ffhh::DeviceProxy> {
1672    debug!("connect_to_hrtimer: trying directory: {}", HRTIMER_DIRECTORY);
1673    let dir =
1674        fuchsia_fs::directory::open_in_namespace(HRTIMER_DIRECTORY, fidl_fuchsia_io::PERM_READABLE)
1675            .with_context(|| format!("Opening {}", HRTIMER_DIRECTORY))?;
1676    let path = device_watcher::watch_for_files(&dir)
1677        .await
1678        .with_context(|| format!("Watching for files in {}", HRTIMER_DIRECTORY))?
1679        .try_next()
1680        .await
1681        .with_context(|| format!("Getting a file from {}", HRTIMER_DIRECTORY))?;
1682    let path = path.ok_or_else(|| anyhow!("Could not find {}", HRTIMER_DIRECTORY))?;
1683    let path = path
1684        .to_str()
1685        .ok_or_else(|| anyhow!("Could not find a valid str for {}", HRTIMER_DIRECTORY))?;
1686    connect_to_named_protocol_at_dir_root::<ffhh::DeviceMarker>(&dir, path)
1687        .context("Failed to connect built-in service")
1688}
1689
1690#[cfg(test)]
1691mod tests {
1692    use super::*;
1693    use assert_matches::assert_matches;
1694    use diagnostics_assertions::{assert_data_tree, AnyProperty};
1695    use fuchsia_async::TestExecutor;
1696    use futures::select;
1697    use test_case::test_case;
1698    use test_util::{assert_gt, assert_lt};
1699
1700    fn fake_wake_lease() -> fidl_fuchsia_power_system::LeaseToken {
1701        let (_lease, peer) = zx::EventPair::create();
1702        peer
1703    }
1704
1705    #[test]
1706    fn timer_duration_no_overflow() {
1707        let duration1 = TimerDuration {
1708            resolution: zx::BootDuration::from_seconds(100_000_000),
1709            ticks: u64::MAX,
1710        };
1711        let duration2 = TimerDuration {
1712            resolution: zx::BootDuration::from_seconds(110_000_000),
1713            ticks: u64::MAX,
1714        };
1715        assert_eq!(duration1, duration1);
1716        assert_eq!(duration2, duration2);
1717
1718        assert_lt!(duration1, duration2);
1719        assert_gt!(duration2, duration1);
1720    }
1721
1722    #[test_case(
1723        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1724        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1725    )]
1726    #[test_case(
1727        TimerDuration::new(zx::BootDuration::from_nanos(1), 10),
1728        TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1729    )]
1730    #[test_case(
1731        TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1732        TimerDuration::new(zx::BootDuration::from_nanos(1), 10)
1733    )]
1734    #[test_case(
1735        TimerDuration::new(zx::BootDuration::from_micros(1), 1),
1736        TimerDuration::new(zx::BootDuration::from_nanos(1), 1000)
1737    )]
1738    fn test_slack_eq(one: TimerDuration, other: TimerDuration) {
1739        assert_eq!(one, other);
1740    }
1741
1742    #[test_case(
1743        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1744        TimerDuration::new(zx::BootDuration::from_nanos(1), 2)
1745    )]
1746    #[test_case(
1747        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1748        TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1749    )]
1750    fn test_slack_lt(one: TimerDuration, other: TimerDuration) {
1751        assert_lt!(one, other);
1752    }
1753
1754    #[test_case(
1755        TimerDuration::new(zx::BootDuration::from_nanos(1), 2),
1756        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1757    )]
1758    #[test_case(
1759        TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1760        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1761    )]
1762    fn test_slack_gt(one: TimerDuration, other: TimerDuration) {
1763        assert_gt!(one, other);
1764    }
1765
1766    #[test_case(
1767        vec![zx::BootDuration::from_nanos(1)],
1768        100,
1769        zx::BootDuration::from_nanos(0),
1770        TimerDuration::new(zx::BootDuration::from_nanos(1), 0) ; "Exact at 0x1ns"
1771    )]
1772    #[test_case(
1773        vec![zx::BootDuration::from_nanos(1)],
1774        100,
1775        zx::BootDuration::from_nanos(50),
1776        TimerDuration::new(zx::BootDuration::from_nanos(1), 50) ; "Exact at 50x1ns"
1777    )]
1778    #[test_case(
1779        vec![zx::BootDuration::from_nanos(2)],
1780        100,
1781        zx::BootDuration::from_nanos(50),
1782        TimerDuration::new(zx::BootDuration::from_nanos(2), 25) ; "Exact at 25x2ns"
1783    )]
1784    #[test_case(
1785        vec![zx::BootDuration::from_nanos(3)],
1786        100,
1787        zx::BootDuration::from_nanos(50),
1788        // The closest duration is 51ns.
1789        TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "Inexact at 51ns"
1790    )]
1791    #[test_case(
1792        vec![
1793            zx::BootDuration::from_nanos(3),
1794            zx::BootDuration::from_nanos(4)
1795        ],
1796        100,
1797        zx::BootDuration::from_nanos(50),
1798        TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "3ns is a better resolution"
1799    )]
1800    #[test_case(
1801        vec![
1802            zx::BootDuration::from_nanos(1000),
1803        ],
1804        100,
1805        zx::BootDuration::from_nanos(50),
1806        TimerDuration::new(zx::BootDuration::from_nanos(1000), 1) ;
1807        "950ns negative slack is the best we can do"
1808    )]
1809    #[test_case(
1810        vec![
1811            zx::BootDuration::from_nanos(1),
1812        ],
1813        10,
1814        zx::BootDuration::from_nanos(50),
1815        TimerDuration::new(zx::BootDuration::from_nanos(1), 10) ;
1816        "10ns positive slack is the best we can do"
1817    )]
1818    #[test_case(
1819        vec![
1820            zx::BootDuration::from_millis(1),
1821            zx::BootDuration::from_micros(100),
1822            zx::BootDuration::from_micros(10),
1823            zx::BootDuration::from_micros(1),
1824        ],
1825        20,  // Make only one of the resolutions above match.
1826        zx::BootDuration::from_micros(150),
1827        TimerDuration::new(zx::BootDuration::from_micros(10), 15) ;
1828        "Realistic case with resolutions from driver, should be 15us"
1829    )]
1830    #[test_case(
1831        vec![
1832            zx::BootDuration::from_millis(1),
1833            zx::BootDuration::from_micros(100),
1834            zx::BootDuration::from_micros(10),
1835            zx::BootDuration::from_micros(1),
1836        ],
1837        2000,  // Make only one of the resolutions above match.
1838        zx::BootDuration::from_micros(6000),
1839        TimerDuration::new(zx::BootDuration::from_millis(1), 6) ;
1840        "Coarser exact unit wins"
1841    )]
1842    fn test_pick_setting(
1843        resolutions: Vec<zx::BootDuration>,
1844        max_ticks: u64,
1845        duration: zx::BootDuration,
1846        expected: TimerDuration,
1847    ) {
1848        let config = TimerConfig::new_from_data(MAIN_TIMER_ID as u64, &resolutions[..], max_ticks);
1849        let actual = config.pick_setting(duration);
1850
1851        // .eq() does not work here, since we do not just require that the values
1852        // be equal, but also that the same resolution is used in both.
1853        assert_slack_eq(expected, actual);
1854    }
1855
1856    // TimerDuration assertion with human-friendly output in case of an error.
1857    fn assert_slack_eq(expected: TimerDuration, actual: TimerDuration) {
1858        let slack = expected.duration() - actual.duration();
1859        assert_eq!(
1860            actual.resolution(),
1861            expected.resolution(),
1862            "\n\texpected: {} ({})\n\tactual  : {} ({})\n\tslack: expected-actual={}",
1863            expected,
1864            format_duration(expected.duration()),
1865            actual,
1866            format_duration(actual.duration()),
1867            format_duration(slack)
1868        );
1869        assert_eq!(
1870            actual.ticks(),
1871            expected.ticks(),
1872            "\n\texpected: {} ({})\n\tactual  : {} ({})\n\tslack: expected-actual={}",
1873            expected,
1874            format_duration(expected.duration()),
1875            actual,
1876            format_duration(actual.duration()),
1877            format_duration(slack)
1878        );
1879    }
1880
1881    #[derive(Debug)]
1882    enum FakeCmd {
1883        SetProperties {
1884            resolutions: Vec<zx::BootDuration>,
1885            max_ticks: i64,
1886            keep_alive: zx::EventPair,
1887            done: zx::Event,
1888        },
1889    }
1890
1891    use std::cell::RefCell;
1892    use std::rc::Rc;
1893
1894    // A fake that emulates some aspects of the hrtimer driver.
1895    //
1896    // Specifically it can be configured with different resolutions, and will
1897    // bomb out if any waiting methods are called twice in a succession, without
1898    // canceling the timer in between.
1899    fn fake_hrtimer_connection(
1900        scope: fasync::ScopeHandle,
1901        rcv: mpsc::Receiver<FakeCmd>,
1902    ) -> ffhh::DeviceProxy {
1903        debug!("fake_hrtimer_connection: entry.");
1904        let (hrtimer, mut stream) =
1905            fidl::endpoints::create_proxy_and_stream::<ffhh::DeviceMarker>();
1906        scope.clone().spawn_local(async move {
1907            let mut rcv = rcv.fuse();
1908            let timer_properties = Rc::new(RefCell::new(None));
1909            let wake_lease = Rc::new(RefCell::new(None));
1910
1911            // Set to true when the hardware timer is supposed to be running.
1912            // Hardware timer may not be reprogrammed without canceling it first,
1913            // make sure the tests fail the same way as production would.
1914            let timer_running = Rc::new(RefCell::new(false));
1915
1916            loop {
1917                let timer_properties = timer_properties.clone();
1918                let wake_lease = wake_lease.clone();
1919                select! {
1920                    cmd = rcv.next() => {
1921                        debug!("fake_hrtimer_connection: cmd: {:?}", cmd);
1922                        match cmd {
1923                            Some(FakeCmd::SetProperties{ resolutions, max_ticks, keep_alive, done}) => {
1924                                let mut timer_props = vec![];
1925                                for v in 0..10 {
1926                                    timer_props.push(ffhh::TimerProperties {
1927                                        supported_resolutions: Some(
1928                                            resolutions.iter()
1929                                                .map(|d| ffhh::Resolution::Duration(d.into_nanos())).collect()),
1930                                        max_ticks: Some(max_ticks.try_into().unwrap()),
1931                                        // start_and_wait method works.
1932                                        supports_wait: Some(true),
1933                                        id: Some(v),
1934                                        ..Default::default()
1935                                        },
1936                                    );
1937                                }
1938                                *timer_properties.borrow_mut() = Some(timer_props);
1939                                *wake_lease.borrow_mut() = Some(keep_alive);
1940                                debug!("set timer properties to: {:?}", timer_properties);
1941                                signal(&done);
1942                            }
1943                            e => {
1944                                panic!("unrecognized command: {:?}", e);
1945                            }
1946                        }
1947                        // Set some responses if we have them.
1948                    },
1949                    event = stream.next() => {
1950                        debug!("fake_hrtimer_connection: event: {:?}", event);
1951                        if let Some(Ok(event)) = event {
1952                            match event {
1953                                ffhh::DeviceRequest::Start { responder, .. } => {
1954                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
1955                                    *timer_running.borrow_mut() = true;
1956                                    responder.send(Ok(())).expect("");
1957                                }
1958                                ffhh::DeviceRequest::Stop { responder, .. } => {
1959                                    *timer_running.borrow_mut() = false;
1960                                    responder.send(Ok(())).expect("");
1961                                }
1962                                ffhh::DeviceRequest::GetTicksLeft { responder, .. } => {
1963                                    responder.send(Ok(1)).expect("");
1964                                }
1965                                ffhh::DeviceRequest::SetEvent { responder, .. } => {
1966                                    responder.send(Ok(())).expect("");
1967                                }
1968                                ffhh::DeviceRequest::StartAndWait { id, resolution, ticks, setup_event, responder, .. } => {
1969                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
1970                                    *timer_running.borrow_mut() = true;
1971                                    debug!("fake_hrtimer_connection: starting timer: \"{}\", resolution: {:?}, ticks: {}", id, resolution, ticks);
1972                                    let ticks: i64 = ticks.try_into().unwrap();
1973                                    let sleep_duration  = zx::BootDuration::from_nanos(ticks * match resolution {
1974                                        ffhh::Resolution::Duration(e) => e,
1975                                        _ => {
1976                                            error!("resolution has an unexpected value");
1977                                            1
1978                                        }
1979                                    });
1980                                    let timer_running_clone = timer_running.clone();
1981                                    scope.spawn_local(async move {
1982                                        // Signaling the setup event allows the client to proceed
1983                                        // with post-scheduling work.
1984                                        signal(&setup_event);
1985
1986                                        // Respond after the requested sleep time. In tests this will
1987                                        // be sleeping in fake time.
1988                                        fasync::Timer::new(sleep_duration).await;
1989                                        *timer_running_clone.borrow_mut() = false;
1990                                        responder.send(Ok(clone_handle(wake_lease.borrow().as_ref().unwrap()))).unwrap();
1991                                    });
1992                                }
1993                                ffhh::DeviceRequest::StartAndWait2 { responder, .. } => {
1994                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
1995                                    *timer_running.borrow_mut() = true;
1996                                    responder.send(Err(ffhh::DriverError::InternalError)).expect("");
1997                                }
1998                                ffhh::DeviceRequest::GetProperties { responder, .. } => {
1999                                    if (*timer_properties).borrow().is_none() {
2000                                        error!("timer_properties is empty, this is not what you want!");
2001                                    }
2002                                    responder
2003                                        .send(ffhh::Properties {
2004                                            timers_properties: (*timer_properties).borrow().clone(),
2005                                            ..Default::default()
2006                                        })
2007                                        .expect("");
2008                                }
2009                                ffhh::DeviceRequest::ReadTimer { responder, .. } => {
2010                                    responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2011                                }
2012                                ffhh::DeviceRequest::ReadClock { responder, .. } => {
2013                                    responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2014                                }
2015                                ffhh::DeviceRequest::_UnknownMethod { .. } => todo!(),
2016                            }
2017                        }
2018                    },
2019                }
2020            }
2021        });
2022        hrtimer
2023    }
2024
2025    struct TestContext {
2026        wake_proxy: fta::WakeAlarmsProxy,
2027        _scope: fasync::Scope,
2028        _cmd_tx: mpsc::Sender<FakeCmd>,
2029    }
2030
2031    impl TestContext {
2032        async fn new() -> Self {
2033            TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(0)).await;
2034
2035            let scope = fasync::Scope::new();
2036            let (mut cmd_tx, wake_proxy) = {
2037                let (tx, rx) = mpsc::channel::<FakeCmd>(0);
2038                let hrtimer_proxy = fake_hrtimer_connection(scope.to_handle(), rx);
2039
2040                let inspector = finspect::component::inspector();
2041                let alarms = Rc::new(Loop::new(
2042                    scope.to_handle(),
2043                    hrtimer_proxy,
2044                    inspector.root().create_child("test"),
2045                ));
2046
2047                let (proxy, stream) =
2048                    fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
2049                scope.spawn_local(async move {
2050                    serve(alarms, stream).await;
2051                });
2052                (tx, proxy)
2053            };
2054
2055            let (_wake_lease, peer) = zx::EventPair::create();
2056            let done = zx::Event::create();
2057            cmd_tx
2058                .start_send(FakeCmd::SetProperties {
2059                    resolutions: vec![zx::Duration::from_nanos(1)],
2060                    max_ticks: 100,
2061                    keep_alive: peer,
2062                    done: done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2063                })
2064                .unwrap();
2065
2066            // Wait until hrtimer configuration has completed.
2067            assert_matches!(fasync::OnSignals::new(done, zx::Signals::EVENT_SIGNALED).await, Ok(_));
2068
2069            Self { wake_proxy, _scope: scope, _cmd_tx: cmd_tx }
2070        }
2071    }
2072
2073    impl Drop for TestContext {
2074        fn drop(&mut self) {
2075            assert_matches!(TestExecutor::next_timer(), None, "Unexpected lingering timers");
2076        }
2077    }
2078
2079    #[fuchsia::test(allow_stalls = false)]
2080    async fn test_basic_timed_wait() {
2081        let ctx = TestContext::new().await;
2082
2083        let deadline = zx::BootInstant::from_nanos(100);
2084        let setup_done = zx::Event::create();
2085        let mut set_task = ctx.wake_proxy.set_and_wait(
2086            deadline.into(),
2087            fta::SetMode::NotifySetupDone(
2088                setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2089            ),
2090            "Hello".into(),
2091        );
2092
2093        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task).await, Poll::Pending);
2094
2095        let mut setup_done_task = fasync::OnSignals::new(setup_done, zx::Signals::EVENT_SIGNALED);
2096        assert_matches!(
2097            TestExecutor::poll_until_stalled(&mut setup_done_task).await,
2098            Poll::Ready(Ok(_)),
2099            "Setup event not triggered after scheduling an alarm"
2100        );
2101
2102        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(100)).await;
2103        assert_matches!(TestExecutor::poll_until_stalled(set_task).await, Poll::Ready(Ok(Ok(_))));
2104    }
2105
2106    #[fuchsia::test(allow_stalls = false)]
2107    async fn test_basic_timed_wait_notify() {
2108        const ALARM_ID: &str = "Hello";
2109        let ctx = TestContext::new().await;
2110
2111        let (notifier_client, mut notifier_stream) =
2112            fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2113        let setup_done = zx::Event::create();
2114        assert_matches!(
2115            ctx.wake_proxy
2116                .set(
2117                    notifier_client,
2118                    fidl::BootInstant::from_nanos(2),
2119                    fta::SetMode::NotifySetupDone(
2120                        setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()
2121                    ),
2122                    ALARM_ID,
2123                )
2124                .await,
2125            Ok(Ok(()))
2126        );
2127
2128        let mut done_task = fasync::OnSignals::new(setup_done, zx::Signals::EVENT_SIGNALED);
2129        assert_matches!(
2130            TestExecutor::poll_until_stalled(&mut done_task).await,
2131            Poll::Ready(Ok(_)),
2132            "Setup event not triggered after scheduling an alarm"
2133        );
2134
2135        let mut next_task = notifier_stream.next();
2136        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2137
2138        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(1)).await;
2139        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2140
2141        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(2)).await;
2142        assert_matches!(
2143            TestExecutor::poll_until_stalled(next_task).await,
2144            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2145        );
2146    }
2147
2148    #[fuchsia::test(allow_stalls = false)]
2149    async fn test_two_alarms_same() {
2150        const DEADLINE_NANOS: i64 = 100;
2151
2152        let ctx = TestContext::new().await;
2153
2154        let mut set_task_1 = ctx.wake_proxy.set_and_wait(
2155            fidl::BootInstant::from_nanos(DEADLINE_NANOS),
2156            fta::SetMode::KeepAlive(fake_wake_lease()),
2157            "Hello1".into(),
2158        );
2159        let mut set_task_2 = ctx.wake_proxy.set_and_wait(
2160            fidl::BootInstant::from_nanos(DEADLINE_NANOS),
2161            fta::SetMode::KeepAlive(fake_wake_lease()),
2162            "Hello2".into(),
2163        );
2164
2165        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2166        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2167
2168        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2169
2170        assert_matches!(
2171            TestExecutor::poll_until_stalled(&mut set_task_1).await,
2172            Poll::Ready(Ok(Ok(_)))
2173        );
2174        assert_matches!(
2175            TestExecutor::poll_until_stalled(&mut set_task_2).await,
2176            Poll::Ready(Ok(Ok(_)))
2177        );
2178    }
2179
2180    #[fuchsia::test(allow_stalls = false)]
2181    async fn test_two_alarms_same_notify() {
2182        const DEADLINE_NANOS: i64 = 100;
2183        const ALARM_ID_1: &str = "Hello1";
2184        const ALARM_ID_2: &str = "Hello2";
2185
2186        let ctx = TestContext::new().await;
2187
2188        let schedule = async |deadline_nanos: i64, alarm_id: &str| {
2189            let (notifier_client, notifier_stream) =
2190                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2191            assert_matches!(
2192                ctx.wake_proxy
2193                    .set(
2194                        notifier_client,
2195                        fidl::BootInstant::from_nanos(deadline_nanos),
2196                        fta::SetMode::KeepAlive(fake_wake_lease()),
2197                        alarm_id,
2198                    )
2199                    .await,
2200                Ok(Ok(()))
2201            );
2202            notifier_stream
2203        };
2204
2205        let mut notifier_1 = schedule(DEADLINE_NANOS, ALARM_ID_1).await;
2206        let mut notifier_2 = schedule(DEADLINE_NANOS, ALARM_ID_2).await;
2207
2208        let mut next_task_1 = notifier_1.next();
2209        let mut next_task_2 = notifier_2.next();
2210
2211        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2212        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_2).await, Poll::Pending);
2213
2214        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2215
2216        assert_matches!(
2217            TestExecutor::poll_until_stalled(&mut next_task_1).await,
2218            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID_1
2219        );
2220        assert_matches!(
2221            TestExecutor::poll_until_stalled(&mut next_task_2).await,
2222            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID_2
2223        );
2224
2225        assert_matches!(
2226            TestExecutor::poll_until_stalled(notifier_1.next()).await,
2227            Poll::Ready(None)
2228        );
2229        assert_matches!(
2230            TestExecutor::poll_until_stalled(notifier_2.next()).await,
2231            Poll::Ready(None)
2232        );
2233    }
2234
2235    #[test_case(100, 200 ; "push out")]
2236    #[test_case(200, 100 ; "pull in")]
2237    #[fuchsia::test(allow_stalls = false)]
2238    async fn test_two_alarms_different(
2239        // One timer scheduled at this instant (fake time starts from zero).
2240        first_deadline_nanos: i64,
2241        // Another timer scheduled at this instant.
2242        second_deadline_nanos: i64,
2243    ) {
2244        let ctx = TestContext::new().await;
2245
2246        let mut set_task_1 = ctx.wake_proxy.set_and_wait(
2247            fidl::BootInstant::from_nanos(first_deadline_nanos),
2248            fta::SetMode::KeepAlive(fake_wake_lease()),
2249            "Hello1".into(),
2250        );
2251        let mut set_task_2 = ctx.wake_proxy.set_and_wait(
2252            fidl::BootInstant::from_nanos(second_deadline_nanos),
2253            fta::SetMode::KeepAlive(fake_wake_lease()),
2254            "Hello2".into(),
2255        );
2256
2257        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2258        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2259
2260        // Sort alarms by their deadlines.
2261        let mut tasks = [(first_deadline_nanos, set_task_1), (second_deadline_nanos, set_task_2)];
2262        tasks.sort_by(|a, b| a.0.cmp(&b.0));
2263        let [mut first_task, mut second_task] = tasks;
2264
2265        // Alarms should fire in order of deadlines.
2266        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(first_task.0)).await;
2267        assert_matches!(
2268            TestExecutor::poll_until_stalled(&mut first_task.1).await,
2269            Poll::Ready(Ok(Ok(_)))
2270        );
2271        assert_matches!(TestExecutor::poll_until_stalled(&mut second_task.1).await, Poll::Pending);
2272
2273        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(second_task.0)).await;
2274        assert_matches!(
2275            TestExecutor::poll_until_stalled(&mut second_task.1).await,
2276            Poll::Ready(Ok(Ok(_)))
2277        );
2278    }
2279
2280    #[test_case(100, 200 ; "push out")]
2281    #[test_case(200, 100 ; "pull in")]
2282    #[fuchsia::test(allow_stalls = false)]
2283    async fn test_two_alarms_different_notify(
2284        // One timer scheduled at this instant (fake time starts from zero).
2285        first_deadline_nanos: i64,
2286        // Another timer scheduled at this instant.
2287        second_deadline_nanos: i64,
2288    ) {
2289        const ALARM_ID_1: &str = "Hello1";
2290        const ALARM_ID_2: &str = "Hello2";
2291
2292        let ctx = TestContext::new().await;
2293
2294        let schedule = async |deadline_nanos: i64, alarm_id: &str| {
2295            let (notifier_client, notifier_stream) =
2296                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2297            assert_matches!(
2298                ctx.wake_proxy
2299                    .set(
2300                        notifier_client,
2301                        fidl::BootInstant::from_nanos(deadline_nanos),
2302                        fta::SetMode::KeepAlive(fake_wake_lease()),
2303                        alarm_id,
2304                    )
2305                    .await,
2306                Ok(Ok(()))
2307            );
2308            notifier_stream
2309        };
2310
2311        // Sort alarms by their deadlines.
2312        let mut notifier_all = futures::stream::select_all([
2313            schedule(first_deadline_nanos, ALARM_ID_1).await,
2314            schedule(second_deadline_nanos, ALARM_ID_2).await,
2315        ]);
2316        let [(early_ns, early_alarm), (later_ns, later_alarm)] = {
2317            let mut tasks =
2318                [(first_deadline_nanos, ALARM_ID_1), (second_deadline_nanos, ALARM_ID_2)];
2319            tasks.sort_by(|a, b| a.0.cmp(&b.0));
2320            tasks
2321        };
2322
2323        // Alarms should fire in order of deadlines.
2324        let mut next_task = notifier_all.next();
2325        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2326
2327        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(early_ns)).await;
2328        assert_matches!(
2329            TestExecutor::poll_until_stalled(next_task).await,
2330            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == early_alarm
2331        );
2332
2333        let mut next_task = notifier_all.next();
2334        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2335
2336        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(later_ns)).await;
2337        assert_matches!(
2338            TestExecutor::poll_until_stalled(next_task).await,
2339            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == later_alarm
2340        );
2341        assert_matches!(
2342            TestExecutor::poll_until_stalled(notifier_all.next()).await,
2343            Poll::Ready(None)
2344        );
2345    }
2346
2347    #[fuchsia::test(allow_stalls = false)]
2348    async fn test_alarm_immediate() {
2349        let ctx = TestContext::new().await;
2350        let mut set_task = ctx.wake_proxy.set_and_wait(
2351            fidl::BootInstant::INFINITE_PAST,
2352            fta::SetMode::KeepAlive(fake_wake_lease()),
2353            "Hello1".into(),
2354        );
2355        assert_matches!(
2356            TestExecutor::poll_until_stalled(&mut set_task).await,
2357            Poll::Ready(Ok(Ok(_)))
2358        );
2359    }
2360
2361    #[fuchsia::test(allow_stalls = false)]
2362    async fn test_alarm_immediate_notify() {
2363        const ALARM_ID: &str = "Hello";
2364        let ctx = TestContext::new().await;
2365
2366        let (notifier_client, mut notifier_stream) =
2367            fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2368
2369        let mut set_task = ctx.wake_proxy.set(
2370            notifier_client,
2371            fidl::BootInstant::INFINITE_PAST,
2372            fta::SetMode::KeepAlive(fake_wake_lease()),
2373            ALARM_ID,
2374        );
2375        assert_matches!(
2376            TestExecutor::poll_until_stalled(&mut set_task).await,
2377            Poll::Ready(Ok(Ok(_)))
2378        );
2379        assert_matches!(
2380            TestExecutor::poll_until_stalled(notifier_stream.next()).await,
2381            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2382        );
2383    }
2384
2385    // Rescheduling a timer will cancel the earlier call and use the new
2386    // deadline for the later call.
2387    #[test_case(200, 100 ; "pull in")]
2388    #[test_case(100, 200 ; "push out")]
2389    #[fuchsia::test(allow_stalls = false)]
2390    async fn test_reschedule(initial_deadline_nanos: i64, override_deadline_nanos: i64) {
2391        const ALARM_ID: &str = "Hello";
2392
2393        let ctx = TestContext::new().await;
2394
2395        let schedule = |deadline_nanos: i64| {
2396            let setup_done = zx::Event::create();
2397            let task = ctx.wake_proxy.set_and_wait(
2398                fidl::BootInstant::from_nanos(deadline_nanos),
2399                fta::SetMode::NotifySetupDone(
2400                    setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2401                ),
2402                ALARM_ID.into(),
2403            );
2404            (task, setup_done)
2405        };
2406
2407        // Schedule timer with a long timeout first. Let it wait, then
2408        // try to reschedule the same timer
2409        let (mut set_task_1, setup_done_1) = schedule(initial_deadline_nanos);
2410        fasync::OnSignals::new(setup_done_1, zx::Signals::EVENT_SIGNALED).await.unwrap();
2411        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2412
2413        // Schedule the same timer as above, but with a shorter deadline. This
2414        // should cancel the earlier call.
2415        let (mut set_task_2, setup_done_2) = schedule(override_deadline_nanos);
2416        fasync::OnSignals::new(setup_done_2, zx::Signals::EVENT_SIGNALED).await.unwrap();
2417        assert_matches!(
2418            TestExecutor::poll_until_stalled(&mut set_task_1).await,
2419            Poll::Ready(Ok(Err(fta::WakeAlarmsError::Dropped)))
2420        );
2421        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2422
2423        // The later call will be fired exactly on the new shorter deadline.
2424        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(override_deadline_nanos - 1))
2425            .await;
2426        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2427        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(override_deadline_nanos))
2428            .await;
2429        assert_matches!(
2430            TestExecutor::poll_until_stalled(&mut set_task_2).await,
2431            Poll::Ready(Ok(Ok(_)))
2432        );
2433
2434        // The values in the inspector tree are fixed because the test
2435        // runs fully deterministically in fake time.
2436        assert_data_tree!(finspect::component::inspector(), root: {
2437            test: {
2438                hardware: {
2439                    // All alarms fired, so this should be "none".
2440                    current_deadline: "(none)",
2441                    remaining_until_alarm: "(none)",
2442                },
2443                now_formatted: format!("{override_deadline_nanos}ns ({override_deadline_nanos})"),
2444                now_ns: override_deadline_nanos,
2445                pending_timers: "\n\t",
2446                pending_timers_count: 0u64,
2447                requested_deadlines_ns: AnyProperty,
2448                schedule_delay_ns: AnyProperty,
2449                slack_ns: AnyProperty,
2450            },
2451        });
2452    }
2453
2454    // Rescheduling a timer will send an error on the old notifier and use the
2455    // new notifier for the new deadline.
2456    #[fuchsia::test(allow_stalls = false)]
2457    async fn test_reschedule_notify() {
2458        const ALARM_ID: &str = "Hello";
2459        const INITIAL_DEADLINE_NANOS: i64 = 100;
2460        const OVERRIDE_DEADLINE_NANOS: i64 = 200;
2461
2462        let ctx = TestContext::new().await;
2463
2464        let schedule = async |deadline_nanos: i64| {
2465            let (notifier_client, notifier_stream) =
2466                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2467            assert_matches!(
2468                ctx.wake_proxy
2469                    .set(
2470                        notifier_client,
2471                        fidl::BootInstant::from_nanos(deadline_nanos),
2472                        fta::SetMode::KeepAlive(fake_wake_lease()),
2473                        ALARM_ID.into(),
2474                    )
2475                    .await,
2476                Ok(Ok(()))
2477            );
2478            notifier_stream
2479        };
2480
2481        let mut notifier_1 = schedule(INITIAL_DEADLINE_NANOS).await;
2482        let mut next_task_1 = notifier_1.next();
2483        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2484
2485        let mut notifier_2 = schedule(OVERRIDE_DEADLINE_NANOS).await;
2486        let mut next_task_2 = notifier_2.next();
2487        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_2).await, Poll::Pending);
2488
2489        // First notifier is called with an error then closed.
2490        assert_matches!(
2491            TestExecutor::poll_until_stalled(&mut next_task_1).await,
2492            Poll::Ready(Some(Ok(fta::NotifierRequest::NotifyError { alarm_id, error, .. }))) if alarm_id == ALARM_ID && error == fta::WakeAlarmsError::Dropped
2493        );
2494        assert_matches!(
2495            TestExecutor::poll_until_stalled(notifier_1.next()).await,
2496            Poll::Ready(None)
2497        );
2498
2499        // Second notifier is called upon the new deadline then closed.
2500        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(OVERRIDE_DEADLINE_NANOS))
2501            .await;
2502        assert_matches!(
2503            TestExecutor::poll_until_stalled(next_task_2).await,
2504            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2505        );
2506        assert_matches!(
2507            TestExecutor::poll_until_stalled(notifier_2.next()).await,
2508            Poll::Ready(None)
2509        );
2510    }
2511
2512    // Rescheduling a timer with the same deadline using SetAndWait will fail
2513    // with WakeAlarmsError::Dropped.
2514    //
2515    // TODO(http://b/430638703): Consider changing this behavior.
2516    #[fuchsia::test(allow_stalls = false)]
2517    async fn test_reschedule_same() {
2518        const ALARM_ID: &str = "Hello";
2519        const DEADLINE_NANOS: i64 = 100;
2520
2521        let ctx = TestContext::new().await;
2522
2523        let schedule = |deadline_nanos: i64| {
2524            let setup_done = zx::Event::create();
2525            let task = ctx.wake_proxy.set_and_wait(
2526                fidl::BootInstant::from_nanos(deadline_nanos),
2527                fta::SetMode::NotifySetupDone(
2528                    setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2529                ),
2530                ALARM_ID.into(),
2531            );
2532            (task, setup_done)
2533        };
2534
2535        // Schedule timer with a long timeout first. Let it wait, then
2536        // try to reschedule the same timer
2537        let (mut set_task_1, setup_done_1) = schedule(DEADLINE_NANOS);
2538        fasync::OnSignals::new(setup_done_1, zx::Signals::EVENT_SIGNALED).await.unwrap();
2539        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2540
2541        // Schedule the same timer as above, but with a shorter deadline. This
2542        // should cancel the earlier call.
2543        let (mut set_task_2, setup_done_2) = schedule(DEADLINE_NANOS);
2544        fasync::OnSignals::new(setup_done_2, zx::Signals::EVENT_SIGNALED).await.unwrap();
2545        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2546        assert_matches!(
2547            TestExecutor::poll_until_stalled(&mut set_task_2).await,
2548            Poll::Ready(Ok(Err(fta::WakeAlarmsError::Dropped)))
2549        );
2550
2551        // The later call will be fired exactly on the new shorter deadline.
2552        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2553        assert_matches!(
2554            TestExecutor::poll_until_stalled(&mut set_task_1).await,
2555            Poll::Ready(Ok(Ok(_)))
2556        );
2557    }
2558
2559    // Rescheduling a timer with the same deadline using Set will notify with
2560    // WakeAlarmsError::Dropped.
2561    //
2562    // TODO(http://b/430638703): Consider changing this behavior.
2563    #[fuchsia::test(allow_stalls = false)]
2564    async fn test_reschedule_same_notify() {
2565        const ALARM_ID: &str = "Hello";
2566        const DEADLINE_NANOS: i64 = 100;
2567
2568        let ctx = TestContext::new().await;
2569
2570        let schedule = async |deadline_nanos: i64| {
2571            let (notifier_client, notifier_stream) =
2572                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2573            assert_matches!(
2574                ctx.wake_proxy
2575                    .set(
2576                        notifier_client,
2577                        fidl::BootInstant::from_nanos(deadline_nanos),
2578                        fta::SetMode::KeepAlive(fake_wake_lease()),
2579                        ALARM_ID.into(),
2580                    )
2581                    .await,
2582                Ok(Ok(()))
2583            );
2584            notifier_stream
2585        };
2586
2587        let mut notifier_1 = schedule(DEADLINE_NANOS).await;
2588        let mut next_task_1 = notifier_1.next();
2589        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2590
2591        // Second notifier will error immediately then close the connection.
2592        let mut notifier_2 = schedule(DEADLINE_NANOS).await;
2593        assert_matches!(
2594            TestExecutor::poll_until_stalled(notifier_2.next()).await,
2595            Poll::Ready(Some(Ok(fta::NotifierRequest::NotifyError { alarm_id, error, .. }))) if alarm_id == ALARM_ID && error == fta::WakeAlarmsError::Dropped
2596        );
2597        assert_matches!(
2598            TestExecutor::poll_until_stalled(notifier_2.next()).await,
2599            Poll::Ready(None)
2600        );
2601
2602        // First notifier is called upon the deadline then closed.
2603        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2604        assert_matches!(
2605            TestExecutor::poll_until_stalled(next_task_1).await,
2606            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2607        );
2608        assert_matches!(
2609            TestExecutor::poll_until_stalled(notifier_1.next()).await,
2610            Poll::Ready(None)
2611        );
2612    }
2613
2614    // If we get two scheduling FIDL errors one after another, the wake alarm
2615    // manager must not lock up.
2616    #[fuchsia::test(allow_stalls = false)]
2617    async fn test_fidl_error_on_reschedule() {
2618        const DEADLINE_NANOS: i64 = 100;
2619
2620        let (wake_proxy, _stream) =
2621            fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
2622        drop(_stream);
2623
2624        assert_matches!(
2625            wake_proxy
2626                .set_and_wait(
2627                    zx::BootInstant::from_nanos(DEADLINE_NANOS).into(),
2628                    fta::SetMode::KeepAlive(fake_wake_lease()),
2629                    "hello1".into(),
2630                )
2631                .await,
2632            Err(fidl::Error::ClientChannelClosed { .. })
2633        );
2634
2635        assert_matches!(
2636            wake_proxy
2637                .set_and_wait(
2638                    zx::BootInstant::from_nanos(DEADLINE_NANOS).into(),
2639                    fta::SetMode::KeepAlive(fake_wake_lease()),
2640                    "hello2".into(),
2641                )
2642                .await,
2643            Err(fidl::Error::ClientChannelClosed { .. })
2644        );
2645    }
2646}