alarms/
lib.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Alarm management subsystem.
6//!
7//! This subsystem serves the FIDL API `fuchsia.time.alarms/Wake`. To instantiate,
8//! you can use the following approach:
9//!
10//! ```ignore
11//! let proxy = client::connect_to_protocol::<ffhh::DeviceMarker>().map_err(
12//!    |e| error!("error: {}", e)).expect("add proper error handling");
13//!    let timer_loop = alarms::Handle::new(proxy);
14//! ```
15//!
16//! From here, use the standard approach with [ServiceFs::new] to expose the
17//! discoverable FIDL endpoint and call:
18//!
19//! ```ignore
20//! let stream: fidl_fuchsia_time_alarms::WakeRequestStream = ... ;
21//! alarms::serve(timer_loop, stream).await;
22//! // ...
23//! ```
24//!
25//! Of course, for everything to work well, your component will need appropriate
26//! capability routing.  Refer to capability routing docs for those details.
27
28mod emu;
29mod timers;
30
31use crate::emu::EmulationTimerOps;
32use anyhow::{Context, Result, anyhow};
33use async_trait::async_trait;
34use fidl::HandleBased;
35use fidl::encoding::ProxyChannelBox;
36use fidl::endpoints::RequestStream;
37use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
38use fuchsia_inspect::{IntProperty, NumericProperty, Property};
39use futures::channel::mpsc;
40use futures::sink::SinkExt;
41use futures::{StreamExt, TryStreamExt};
42use log::{debug, error, warn};
43use scopeguard::defer;
44use std::cell::RefCell;
45use std::rc::Rc;
46use std::sync::LazyLock;
47use time_pretty::{MSEC_IN_NANOS, format_duration, format_timer};
48use zx::AsHandleRef;
49use {
50    fidl_fuchsia_hardware_hrtimer as ffhh, fidl_fuchsia_time_alarms as fta,
51    fuchsia_async as fasync, fuchsia_inspect as finspect, fuchsia_runtime as fxr,
52    fuchsia_trace as trace,
53};
54
55static I64_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i64::MAX.try_into().expect("infallible"));
56static I32_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i32::MAX.try_into().expect("infallible"));
57
58/// The largest value of timer "ticks" that is still considered useful.
59static MAX_USEFUL_TICKS: LazyLock<u64> = LazyLock::new(|| *I32_MAX_AS_U64);
60
61/// The smallest value of "ticks" that we can program into the driver. To wit,
62/// driver will reject "0" ticks, even though it probably shouldn't. See
63/// for details: b/437177931.
64static MIN_USEFUL_TICKS: u64 = 1;
65
66/// The hrtimer ID used for scheduling wake alarms.  This ID is reused from
67/// Starnix, and should eventually no longer be critical.
68const MAIN_TIMER_ID: usize = 6;
69
70/// This is what we consider a "long" delay in alarm operations.
71const LONG_DELAY_NANOS: i64 = 2000 * MSEC_IN_NANOS;
72
73/// A macro that waits on a future, but if the future takes longer than
74/// 30 seconds to complete, logs a warning message.
75macro_rules! log_long_op {
76    ($fut:expr) => {{
77        use futures::FutureExt;
78        let fut = $fut;
79        futures::pin_mut!(fut);
80        loop {
81            let timeout = fasync::Timer::new(std::time::Duration::from_secs(30));
82            futures::select! {
83                res = fut.as_mut().fuse() => {
84                    break res;
85                }
86                _ = timeout.fuse() => {
87                    warn!("unexpected blocking: long-running async operation at {}:{}", file!(), line!());
88                    #[cfg(all(target_os = "fuchsia", not(doc)))]
89                    ::debug::backtrace_request_all_threads();
90                }
91            }
92        }
93    }};
94}
95
96/// Increments the value of an underlying inspect property during its lifetime.
97struct ScopedInc<'a> {
98    property: &'a IntProperty,
99}
100
101impl<'a> ScopedInc<'a> {
102    fn new(property: &'a IntProperty) -> Self {
103        property.add(1);
104        Self { property }
105    }
106}
107
108impl<'a> Drop for ScopedInc<'a> {
109    fn drop(&mut self) {
110        self.property.add(-1);
111    }
112}
113
114/// Compares two optional deadlines and returns true if the `before is different from `after.
115/// Nones compare as equal.
116fn is_deadline_changed(
117    before: Option<fasync::BootInstant>,
118    after: Option<fasync::BootInstant>,
119) -> bool {
120    match (before, after) {
121        (None, None) => false,
122        (None, Some(_)) | (Some(_), None) => true,
123        (Some(before), Some(after)) => before != after,
124    }
125}
126
127// Errors returnable from [TimerOps] calls.
128#[derive(Debug)]
129pub(crate) enum TimerOpsError {
130    /// The driver reported an error.
131    Driver(ffhh::DriverError),
132    /// FIDL-specific RPC error.
133    Fidl(fidl::Error),
134}
135
136trait SawResponseFut: std::future::Future<Output = Result<zx::EventPair, TimerOpsError>> {
137    // nop
138}
139
140/// Abstracts away timer operations.
141#[async_trait(?Send)]
142pub(crate) trait TimerOps {
143    /// Stop the timer with the specified ID.
144    async fn stop(&self, id: u64);
145
146    /// Examine the timer's properties, such as supported resolutions and tick
147    /// counts.
148    async fn get_timer_properties(&self) -> TimerConfig;
149
150    /// This method must return an actual future, to handle the borrow checker:
151    /// making this async will assume that `self` remains borrowed, which will
152    /// thwart attempts to move the return value of this call into a separate
153    /// closure.
154    fn start_and_wait(
155        &self,
156        id: u64,
157        resolution: &ffhh::Resolution,
158        ticks: u64,
159        setup_event: zx::Event,
160    ) -> std::pin::Pin<Box<dyn SawResponseFut>>;
161}
162
163/// TimerOps backed by an actual hardware timer.
164struct HardwareTimerOps {
165    proxy: ffhh::DeviceProxy,
166}
167
168impl HardwareTimerOps {
169    fn new(proxy: ffhh::DeviceProxy) -> Box<Self> {
170        Box::new(Self { proxy })
171    }
172}
173
174#[async_trait(?Send)]
175impl TimerOps for HardwareTimerOps {
176    async fn stop(&self, id: u64) {
177        let _ = self
178            .proxy
179            .stop(id)
180            .await
181            .map(|result| {
182                let _ = result.map_err(|e| warn!("stop_hrtimer: driver error: {:?}", e));
183            })
184            .map_err(|e| warn!("stop_hrtimer: could not stop prior timer: {}", e));
185    }
186
187    async fn get_timer_properties(&self) -> TimerConfig {
188        match log_long_op!(self.proxy.get_properties()) {
189            Ok(p) => {
190                let timers_properties = &p.timers_properties.expect("timers_properties must exist");
191                debug!("get_timer_properties: got: {:?}", timers_properties);
192
193                // Pick the correct hrtimer to use for wakes.
194                let timer_index = if timers_properties.len() > MAIN_TIMER_ID {
195                    // Mostly vim3, where we have pre-existing timer allocations
196                    // that we don't need to change.
197                    MAIN_TIMER_ID
198                } else if timers_properties.len() > 0 {
199                    // Newer devices that don't need to allocate timer IDs, and/or
200                    // may not even have as many timers as vim3 does. But, at least
201                    // one timer is needed.
202                    0
203                } else {
204                    // Give up.
205                    return TimerConfig::new_empty();
206                };
207                let main_timer_properties = &timers_properties[timer_index];
208                debug!("alarms: main_timer_properties: {:?}", main_timer_properties);
209                // Not sure whether it is useful to have more ticks than this, so limit it.
210                let max_ticks: u64 = std::cmp::min(
211                    main_timer_properties.max_ticks.unwrap_or(*MAX_USEFUL_TICKS),
212                    *MAX_USEFUL_TICKS,
213                );
214                let resolutions = &main_timer_properties
215                    .supported_resolutions
216                    .as_ref()
217                    .expect("supported_resolutions is populated")
218                    .iter()
219                    .last() //  Limits the resolution to the coarsest available.
220                    .map(|r| match *r {
221                        ffhh::Resolution::Duration(d) => d,
222                        _ => {
223                            error!(
224                            "get_timer_properties: Unknown resolution type, returning millisecond."
225                        );
226                            MSEC_IN_NANOS
227                        }
228                    })
229                    .map(|d| zx::BootDuration::from_nanos(d))
230                    .into_iter() // Used with .last() above.
231                    .collect::<Vec<_>>();
232                let timer_id = main_timer_properties.id.expect("timer ID is always present");
233                TimerConfig::new_from_data(timer_id, resolutions, max_ticks)
234            }
235            Err(e) => {
236                error!("could not get timer properties: {:?}", e);
237                TimerConfig::new_empty()
238            }
239        }
240    }
241
242    fn start_and_wait(
243        &self,
244        id: u64,
245        resolution: &ffhh::Resolution,
246        ticks: u64,
247        setup_event: zx::Event,
248    ) -> std::pin::Pin<Box<dyn SawResponseFut>> {
249        let inner = self.proxy.start_and_wait(id, resolution, ticks, setup_event);
250        Box::pin(HwResponseFut { pinner: Box::pin(inner) })
251    }
252}
253
254// Untangles the borrow checker issues that otherwise result from making
255// TimerOps::start_and_wait an async function.
256struct HwResponseFut {
257    pinner: std::pin::Pin<
258        Box<
259            fidl::client::QueryResponseFut<
260                ffhh::DeviceStartAndWaitResult,
261                fidl::encoding::DefaultFuchsiaResourceDialect,
262            >,
263        >,
264    >,
265}
266
267use std::task::Poll;
268impl SawResponseFut for HwResponseFut {}
269impl std::future::Future for HwResponseFut {
270    type Output = Result<zx::EventPair, TimerOpsError>;
271    fn poll(
272        mut self: std::pin::Pin<&mut Self>,
273        cx: &mut std::task::Context<'_>,
274    ) -> std::task::Poll<Self::Output> {
275        let inner_poll = self.pinner.as_mut().poll(cx);
276        match inner_poll {
277            Poll::Ready(result) => Poll::Ready(match result {
278                Ok(Ok(keep_alive)) => Ok(keep_alive),
279                Ok(Err(e)) => Err(TimerOpsError::Driver(e)),
280                Err(e) => Err(TimerOpsError::Fidl(e)),
281            }),
282            Poll::Pending => Poll::Pending,
283        }
284    }
285}
286
287/// Stops a currently running hardware timer.
288async fn stop_hrtimer(hrtimer: &Box<dyn TimerOps>, timer_config: &TimerConfig) {
289    trace::duration!(c"alarms", c"hrtimer:stop", "id" => timer_config.id);
290    debug!("stop_hrtimer: stopping hardware timer: {}", timer_config.id);
291    hrtimer.stop(timer_config.id).await;
292    debug!("stop_hrtimer: stopped  hardware timer: {}", timer_config.id);
293}
294
295// The default size of the channels created in this module.
296// This is very unlikely to create bottlenecks.
297const CHANNEL_SIZE: usize = 1000;
298
299/// A type handed around between the concurrent loops run by this module.
300#[derive(Debug)]
301enum Cmd {
302    /// Request a timer to be started.
303    Start {
304        /// The unique connection ID.
305        conn_id: zx::Koid,
306        /// A timestamp (presumably in the future), at which to expire the timer.
307        deadline: timers::Deadline,
308        // The API supports several modes. See fuchsia.time.alarms/Wake.fidl.
309        //
310        // Optional, because not always needed:
311        //
312        // * `mode` is required for hanging get API calls (e.g. `StartAndWait`), as we must signal
313        //   when the alarm is scheduled.
314        // * The calls such as `SetUtc` which return only upon scheduling do not need a `mode`, as
315        //   the caller can wait for the call to return immediately.
316        mode: Option<fta::SetMode>,
317        /// An alarm identifier, chosen by the caller.
318        alarm_id: String,
319        /// A responder that will be called when the timer expires. The
320        /// client end of the connection will block until we send something
321        /// on this responder.
322        ///
323        /// This is packaged into a Rc... only because both the "happy path"
324        /// and the error path must consume the responder.  This allows them
325        /// to be consumed, without the responder needing to implement Default.
326        responder: Rc<dyn timers::Responder>,
327    },
328    StopById {
329        done: zx::Event,
330        timer_id: timers::Id,
331    },
332    Alarm {
333        expired_deadline: fasync::BootInstant,
334        keep_alive: fidl::EventPair,
335    },
336    AlarmFidlError {
337        expired_deadline: fasync::BootInstant,
338        error: fidl::Error,
339    },
340    AlarmDriverError {
341        expired_deadline: fasync::BootInstant,
342        error: ffhh::DriverError,
343
344        // Added these for debugging details, otherwise not necessary.
345        timer_config_id: u64,
346        resolution_nanos: i64,
347        ticks: u64,
348    },
349    /// The UTC clock transformation has been updated.
350    UtcUpdated {
351        // The new boot-to-utc clock transformation.
352        transform: fxr::UtcClockTransform,
353    },
354}
355
356impl std::fmt::Display for Cmd {
357    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
358        match self {
359            Cmd::Start { conn_id, deadline, alarm_id, .. } => {
360                write!(
361                    f,
362                    "Start[alarm_id=\"{}\", conn_id={:?}, deadline={}]",
363                    alarm_id, conn_id, deadline,
364                )
365            }
366            Cmd::Alarm { expired_deadline, .. } => {
367                write!(f, "Alarm[deadline={}]", format_timer((*expired_deadline).into()))
368            }
369            Cmd::AlarmFidlError { expired_deadline, error } => {
370                write!(
371                    f,
372                    "FIDLError[deadline={}, err={}, NO_WAKE_LEASE!]",
373                    format_timer((*expired_deadline).into()),
374                    error
375                )
376            }
377            Cmd::AlarmDriverError { expired_deadline, error, .. } => {
378                write!(
379                    f,
380                    "DriverError[deadline={}, err={:?}, NO_WAKE_LEASE!]",
381                    format_timer((*expired_deadline).into()),
382                    error
383                )
384            }
385            Cmd::StopById { timer_id, done: _ } => {
386                write!(f, "StopById[timerId={}]", timer_id,)
387            }
388            Cmd::UtcUpdated { transform } => {
389                write!(f, "UtcUpdated[timerId={transform:?}]")
390            }
391        }
392    }
393}
394
395/// Extracts a KOID from the underlying channel of the provided stream.
396///
397/// This function deconstructs the provided stream to access the underlying
398/// channel and extract its KOID. It then reconstructs the stream and returns
399/// it to the caller along with the KOID.
400///
401/// # Args
402/// - `stream`: The `fta::WakeAlarmsRequestStream` to extract the KOID from.
403///
404/// # Returns
405/// A tuple containing the `zx::Koid` of the stream's channel and the
406/// reconstructed `fta::WakeAlarmsRequestStream`.
407pub fn get_stream_koid(
408    stream: fta::WakeAlarmsRequestStream,
409) -> (zx::Koid, fta::WakeAlarmsRequestStream) {
410    let (inner, is_terminated) = stream.into_inner();
411    let koid = inner.channel().as_channel().get_koid().expect("infallible");
412    let stream = fta::WakeAlarmsRequestStream::from_inner(inner, is_terminated);
413    (koid, stream)
414}
415
416/// Serves a single Wake API client.
417///
418/// This function processes incoming requests from a `fta::WakeAlarmsRequestStream`,
419/// handling each request by calling `handle_request`. It continues to process
420/// requests until the stream is exhausted.
421///
422/// # Args
423/// - `timer_loop`: A reference-counted pointer to the `Loop` that manages timers.
424/// - `requests`: The stream of incoming `fta::WakeAlarmsRequest` from a client.
425pub async fn serve(timer_loop: Rc<Loop>, requests: fta::WakeAlarmsRequestStream) {
426    let timer_loop = timer_loop.clone();
427    let timer_loop_send = || timer_loop.get_sender();
428    let (conn_id, mut requests) = get_stream_koid(requests);
429    let mut request_count = 0;
430    debug!("alarms::serve: opened connection: {:?}", conn_id);
431    while let Some(maybe_request) = requests.next().await {
432        request_count += 1;
433        debug!("alarms::serve: conn_id: {:?} incoming request: {}", conn_id, request_count);
434        match maybe_request {
435            Ok(request) => {
436                // Should return quickly.
437                handle_request(conn_id, timer_loop_send(), request).await;
438            }
439            Err(e) => {
440                warn!("alarms::serve: error in request: {:?}", e);
441            }
442        }
443        debug!("alarms::serve: conn_id: {:?} done request: {}", conn_id, request_count);
444    }
445    // Check if connection closure was intentional. It is way too easy to close
446    // a FIDL connection inadvertently if doing non-mainstream things with FIDL.
447    warn!("alarms::serve: CLOSED CONNECTION: conn_id: {:?}", conn_id);
448}
449
450async fn handle_cancel(alarm_id: String, conn_id: zx::Koid, cmd: &mut mpsc::Sender<Cmd>) {
451    let done = zx::Event::create();
452    let timer_id = timers::Id::new(alarm_id.clone(), conn_id);
453    if let Err(e) = cmd.send(Cmd::StopById { timer_id, done: clone_handle(&done) }).await {
454        warn!("handle_request: error while trying to cancel: {}: {:?}", alarm_id, e);
455    }
456    wait_signaled(&done).await;
457}
458
459/// Processes a single Wake API request from a single client.
460/// This function is expected to return quickly.
461///
462/// # Args
463/// - `conn_id`: the unique identifier of the connection producing these requests.
464/// - `cmd`: the outbound queue of commands to deliver to the timer manager.
465/// - `request`: a single inbound Wake FIDL API request.
466async fn handle_request(
467    conn_id: zx::Koid,
468    mut cmd: mpsc::Sender<Cmd>,
469    request: fta::WakeAlarmsRequest,
470) {
471    match request {
472        fta::WakeAlarmsRequest::SetAndWait { deadline, mode, alarm_id, responder } => {
473            // Since responder is consumed by the happy path and the error path, but not both,
474            // and because the responder does not implement Default, this is a way to
475            // send it in two mutually exclusive directions.  Each direction will reverse
476            // this wrapping once the responder makes it to the other side.
477            //
478            // Rc required because of sharing a noncopyable struct; RefCell required because
479            // borrow_mut() is needed to move out; and Option is required so we can
480            // use take() to replace the struct with None so it does not need to leave
481            // a Default in its place.
482            let responder = Rc::new(RefCell::new(Some(responder)));
483
484            // Alarm is not scheduled yet!
485            debug!(
486                "handle_request: scheduling alarm_id: \"{}\"\n\tconn_id: {:?}\n\tdeadline: {}",
487                alarm_id,
488                conn_id,
489                format_timer(deadline.into())
490            );
491            // Expected to return quickly.
492            let deadline = timers::Deadline::Boot(deadline.into());
493            if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
494                conn_id,
495                deadline,
496                mode: Some(mode),
497                alarm_id: alarm_id.clone(),
498                responder: responder.clone(),
499            })) {
500                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
501                responder
502                    .borrow_mut()
503                    .take()
504                    .expect("always present if call fails")
505                    .send(Err(fta::WakeAlarmsError::Internal))
506                    .unwrap();
507            }
508        }
509        fta::WakeAlarmsRequest::SetAndWaitUtc { deadline, mode, alarm_id, responder } => {
510            // Quickly get rid of the custom wake alarms deadline type.
511            let deadline =
512                timers::Deadline::Utc(fxr::UtcInstant::from_nanos(deadline.timestamp_utc));
513
514            // The rest of this match branch is the same as for `SetAndWait`. However, the handling
515            // is for now simple enough that we don't need to explore factoring common actions out.
516            let responder = Rc::new(RefCell::new(Some(responder)));
517            debug!(
518                "handle_request: scheduling alarm_id UTC: \"{alarm_id}\"\n\tconn_id: {conn_id:?}\n\tdeadline: {deadline}",
519            );
520
521            if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
522                conn_id,
523                deadline,
524                mode: Some(mode),
525                alarm_id: alarm_id.clone(),
526                responder: responder.clone(),
527            })) {
528                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
529                responder
530                    .borrow_mut()
531                    .take()
532                    .expect("always present if call fails")
533                    .send(Err(fta::WakeAlarmsError::Internal))
534                    .unwrap();
535            }
536        }
537        fta::WakeAlarmsRequest::Cancel { alarm_id, .. } => {
538            // TODO: b/383062441 - make this into an async task so that we wait
539            // less to schedule the next alarm.
540            log_long_op!(handle_cancel(alarm_id, conn_id, &mut cmd));
541        }
542        fta::WakeAlarmsRequest::Set { notifier, deadline, mode, alarm_id, responder } => {
543            // Alarm is not scheduled yet!
544            debug!(
545                "handle_request: scheduling alarm_id: \"{alarm_id}\"\n\tconn_id: {conn_id:?}\n\tdeadline: {}",
546                format_timer(deadline.into())
547            );
548            // Expected to return quickly.
549            if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
550                conn_id,
551                deadline: timers::Deadline::Boot(deadline.into()),
552                mode: Some(mode),
553                alarm_id: alarm_id.clone(),
554                responder: Rc::new(RefCell::new(Some(notifier))),
555            })) {
556                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
557                responder.send(Err(fta::WakeAlarmsError::Internal)).unwrap();
558            } else {
559                // Successfully scheduled the alarm.
560                responder.send(Ok(())).unwrap();
561            }
562        }
563        fta::WakeAlarmsRequest::_UnknownMethod { .. } => {}
564    };
565}
566
567/// Represents a single alarm event processing loop.
568///
569/// One instance is created per each alarm-capable low-level device. The `Loop`
570/// is responsible for managing the lifecycle of wake alarms, including their
571/// creation, scheduling, and cancellation. It interacts with the underlying
572/// hardware timer through a `TimerOps` trait object.
573pub struct Loop {
574    // Given to any clients that need to send messages to `_task`
575    // via [get_sender].
576    snd: mpsc::Sender<Cmd>,
577}
578
579impl Loop {
580    /// Creates a new instance of `Loop`.
581    ///
582    /// This function initializes a new `Loop` with a connection to a low-level
583    /// hardware timer device. It spawns two background tasks: one for the main
584    /// timer event loop and another for monitoring UTC clock changes.
585    ///
586    /// # Args
587    /// - `scope`: The `fasync::ScopeHandle` to spawn background tasks in.
588    /// - `device_proxy`: A `ffhh::DeviceProxy` for communicating with the hardware timer.
589    /// - `inspect`: A `finspect::Node` for recording diagnostics.
590    /// - `utc_clock`: A `fxr::UtcClock` for tracking UTC time.
591    ///
592    /// # Returns
593    /// A new instance of `Loop`.
594    pub fn new(
595        scope: fasync::ScopeHandle,
596        device_proxy: ffhh::DeviceProxy,
597        inspect: finspect::Node,
598        utc_clock: fxr::UtcClock,
599    ) -> Self {
600        let hw_device_timer_ops = HardwareTimerOps::new(device_proxy);
601        Loop::new_internal(scope, hw_device_timer_ops, inspect, utc_clock)
602    }
603
604    /// Creates a new instance of `Loop` with emulated wake alarms.
605    ///
606    /// This function is similar to `new`, but it uses an emulated timer instead
607    /// of a real hardware timer. This is useful for testing environments where
608    /// a hardware timer may not be available.
609    ///
610    /// # Args
611    /// - `scope`: The `fasync::ScopeHandle` to spawn background tasks in.
612    /// - `inspect`: A `finspect::Node` for recording diagnostics.
613    /// - `utc_clock`: A `fxr::UtcClock` for tracking UTC time.
614    ///
615    /// # Returns
616    /// A new instance of `Loop` with an emulated timer.
617    pub fn new_emulated(
618        scope: fasync::ScopeHandle,
619        inspect: finspect::Node,
620        utc_clock: fxr::UtcClock,
621    ) -> Self {
622        let timer_ops = Box::new(EmulationTimerOps::new());
623        Loop::new_internal(scope, timer_ops, inspect, utc_clock)
624    }
625
626    fn new_internal(
627        scope: fasync::ScopeHandle,
628        timer_ops: Box<dyn TimerOps>,
629        inspect: finspect::Node,
630        utc_clock: fxr::UtcClock,
631    ) -> Self {
632        let utc_transform = Rc::new(RefCell::new(
633            utc_clock.get_details().expect("has UTC clock READ capability").reference_to_synthetic,
634        ));
635
636        let (snd, rcv) = mpsc::channel(CHANNEL_SIZE);
637        let loop_scope = scope.clone();
638
639        scope.spawn_local(wake_timer_loop(
640            loop_scope,
641            snd.clone(),
642            rcv,
643            timer_ops,
644            inspect,
645            utc_transform,
646        ));
647        scope.spawn_local(monitor_utc_clock_changes(utc_clock, snd.clone()));
648        Self { snd }
649    }
650
651    /// Gets a copy of a channel through which async commands may be sent to
652    /// the [Loop].
653    fn get_sender(&self) -> mpsc::Sender<Cmd> {
654        self.snd.clone()
655    }
656}
657
658// Forwards the clock transformation of an updated clock into the alarm manager, to allow
659// correcting the boot time deadlines of clocks on the UTC timeline.
660async fn monitor_utc_clock_changes(utc_clock: fxr::UtcClock, mut cmd: mpsc::Sender<Cmd>) {
661    let koid = utc_clock.as_handle_ref().get_koid();
662    log::info!("monitor_utc_clock_changes: entry");
663    loop {
664        // CLOCK_UPDATED signal is self-clearing.
665        fasync::OnSignals::new(utc_clock.as_handle_ref(), zx::Signals::CLOCK_UPDATED)
666            .await
667            .expect("UTC clock is readable");
668
669        let transform =
670            utc_clock.get_details().expect("UTC clock details are readable").reference_to_synthetic;
671        log::debug!("Received a UTC update: koid={koid:?}: {transform:?}");
672        if let Err(err) = cmd.send(Cmd::UtcUpdated { transform }).await {
673            // This is OK in tests.
674            log::warn!("monitor_utc_clock_changes: exit: {err:?}");
675            break;
676        }
677    }
678}
679
680/// Clones a handle infallibly with `zx::Rights::SAME_RIGHTS`.
681///
682/// This function duplicates a handle, preserving its rights. It will panic if
683/// the handle duplication fails, which is not expected to happen under normal
684/// circumstances.
685///
686/// # Args
687/// - `handle`: A reference to a handle-based object to be cloned.
688///
689/// # Returns
690/// A new handle with the same rights as the original.
691pub fn clone_handle<H: HandleBased>(handle: &H) -> H {
692    handle.duplicate_handle(zx::Rights::SAME_RIGHTS).expect("infallible")
693}
694
695async fn wait_signaled<H: HandleBased>(handle: &H) {
696    fasync::OnSignals::new(handle, zx::Signals::EVENT_SIGNALED).await.expect("infallible");
697}
698
699pub(crate) fn signal<H: HandleBased>(handle: &H) {
700    handle.signal_handle(zx::Signals::NONE, zx::Signals::EVENT_SIGNALED).expect("infallible");
701}
702
703/// A [TimerDuration] represents a duration of time that can be expressed by
704/// a discrete timer register.
705///
706/// This is a low-level representation of time duration, used in interaction with
707/// hardware devices. It is therefore necessarily discretized, with adaptive
708/// resolution, depending on the physical characteristics of the underlying
709/// hardware timer that it models.
710#[derive(Debug, Clone, Copy)]
711struct TimerDuration {
712    // The resolution of each one of the `ticks` below.
713    resolution: zx::BootDuration,
714    // The number of ticks that encodes time duration. Each "tick" represents
715    // one unit of `resolution` above.
716    ticks: u64,
717}
718
719/// This and the comparison traits below are used to allow TimerDuration
720/// calculations in a compact form.
721impl Eq for TimerDuration {}
722
723impl std::cmp::PartialOrd for TimerDuration {
724    fn partial_cmp(&self, other: &TimerDuration) -> Option<std::cmp::Ordering> {
725        Some(self.cmp(other))
726    }
727}
728
729impl std::cmp::PartialEq for TimerDuration {
730    fn eq(&self, other: &Self) -> bool {
731        self.cmp(other) == std::cmp::Ordering::Equal
732    }
733}
734
735impl std::cmp::Ord for TimerDuration {
736    /// Two [TimerDuration]s compare equal if they model exactly the same duration of time,
737    /// no matter the resolutions.
738    fn cmp(&self, other: &TimerDuration) -> std::cmp::Ordering {
739        let self_ticks_128: i128 = self.ticks as i128;
740        let self_resolution: i128 = self.resolution_as_nanos() as i128;
741        let self_nanos = self_resolution * self_ticks_128;
742
743        let other_ticks_128: i128 = other.ticks as i128;
744        let other_resolution: i128 = other.resolution_as_nanos() as i128;
745        let other_nanos = other_resolution * other_ticks_128;
746
747        self_nanos.cmp(&other_nanos)
748    }
749}
750
751impl std::fmt::Display for TimerDuration {
752    /// Human readable TimerDuration exposes both the tick count and the resolution,
753    /// in the format of "ticks x resolution", with an end result of
754    /// `10x5ms` for example.
755    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
756        let ticks = self.ticks;
757        let resolution = self.resolution();
758        // Example: 10x1ms
759        write!(f, "{}x{}", ticks, format_duration(resolution),)
760    }
761}
762
763impl TimerDuration {
764    /// The maximum representable TimerDuration that we allow.
765    fn max() -> Self {
766        TimerDuration::new(zx::BootDuration::from_nanos(1), *I64_MAX_AS_U64)
767    }
768
769    /// The zero [TimerDuration].
770    fn zero() -> Self {
771        TimerDuration::new(zx::BootDuration::from_nanos(1), 0)
772    }
773
774    /// Creates a new timer duration with the given parameters.
775    fn new(resolution: zx::BootDuration, ticks: u64) -> Self {
776        Self { resolution, ticks }
777    }
778
779    /// Creates a new timer duration using the resolution from `res_source` and
780    /// a specified number of ticks.
781    fn new_with_resolution(res_source: &TimerDuration, ticks: u64) -> Self {
782        Self::new(res_source.resolution, ticks)
783    }
784
785    /// Returns the time duration represented by this TimerDuration.
786    ///
787    /// Due to the way duration is expressed, the same time duration
788    /// can be represented in multiple ways.
789    fn duration(&self) -> zx::BootDuration {
790        let duration_as_nanos = self.resolution_as_nanos() * self.ticks;
791        let clamp_duration = std::cmp::min(*I32_MAX_AS_U64, duration_as_nanos);
792        zx::BootDuration::from_nanos(clamp_duration.try_into().expect("result was clamped"))
793    }
794
795    /// The resolution of this TimerDuration
796    fn resolution(&self) -> zx::BootDuration {
797        self.resolution
798    }
799
800    fn resolution_as_nanos(&self) -> u64 {
801        self.resolution().into_nanos().try_into().expect("resolution is never negative")
802    }
803
804    /// The number of ticks of this [TimerDuration].
805    fn ticks(&self) -> u64 {
806        self.ticks
807    }
808}
809
810impl From<zx::BootDuration> for TimerDuration {
811    fn from(d: zx::BootDuration) -> TimerDuration {
812        let nanos = d.into_nanos();
813        assert!(nanos >= 0);
814        let nanos_u64 = nanos.try_into().expect("guarded by assert");
815        TimerDuration::new(zx::BootDuration::from_nanos(1), nanos_u64)
816    }
817}
818
819impl std::ops::Div for TimerDuration {
820    type Output = u64;
821    fn div(self, rhs: Self) -> Self::Output {
822        let self_nanos = self.resolution_as_nanos() * self.ticks;
823        let rhs_nanos = rhs.resolution_as_nanos() * rhs.ticks;
824        self_nanos / rhs_nanos
825    }
826}
827
828impl std::ops::Mul<u64> for TimerDuration {
829    type Output = Self;
830    fn mul(self, rhs: u64) -> Self::Output {
831        Self::new(self.resolution, self.ticks * rhs)
832    }
833}
834
835/// Contains the configuration of a specific timer.
836#[derive(Debug)]
837pub(crate) struct TimerConfig {
838    /// The resolutions supported by this timer. Each entry is one possible
839    /// duration for on timer "tick".  The resolution is picked when a timer
840    /// request is sent.
841    ///
842    /// The resolutions MUST be sorted from finest (index 0) to coarsest.
843    ///
844    /// There MUST be at least one resolution.
845    resolutions: Vec<zx::BootDuration>,
846    /// The maximum count of "ticks" that the timer supports. The timer usually
847    /// has a register that counts up or down based on a clock signal with
848    /// the period specified by `resolutions`.  This is the maximum value that
849    /// the counter can count to without overflowing.
850    max_ticks: u64,
851    /// The stable ID of the timer with the above configuration.
852    id: u64,
853}
854
855impl TimerConfig {
856    /// Creates a new timer config with supported timer resolutions and the max
857    /// ticks value for the timer's counter.
858    fn new_from_data(timer_id: u64, resolutions: &[zx::BootDuration], max_ticks: u64) -> Self {
859        debug!(
860            "TimerConfig: resolutions: {:?}, max_ticks: {}, timer_id: {}",
861            resolutions.iter().map(|r| format_duration(*r)).collect::<Vec<_>>(),
862            max_ticks,
863            timer_id
864        );
865        let resolutions = resolutions.iter().map(|d| *d).collect::<Vec<zx::BootDuration>>();
866        TimerConfig { resolutions, max_ticks, id: timer_id }
867    }
868
869    fn new_empty() -> Self {
870        error!("TimerConfig::new_empty() called, this is not OK.");
871        TimerConfig { resolutions: vec![], max_ticks: 0, id: 0 }
872    }
873
874    // Picks the most appropriate timer setting for it to fire as close as possible
875    // when `duration` expires.
876    //
877    // If duration is too far in the future for what the timer supports,
878    // return a smaller value, to allow the timer to be reprogrammed multiple
879    // times.
880    //
881    // If the available menu of resolutions is such that we can wake only after
882    // the intended deadline, begrudgingly return that option.
883    fn pick_setting(&self, duration: zx::BootDuration) -> TimerDuration {
884        assert!(self.resolutions.len() > 0, "there must be at least one supported resolution");
885
886        // Driver does not support zero ticks, so we must accept the finest resolution duration
887        // instead.
888        if duration <= zx::BootDuration::ZERO {
889            return TimerDuration::new(self.resolutions[0], 1);
890        }
891
892        //  0         |-------------->|<---------------|
893        //  |---------+---------------+----------------+---->
894        //  |---------^               |                |
895        //  | best positive slack     |                |
896        //  |-------------------------^ duration       |
897        //  |------------------------------------------^ best negative slack.
898        let mut best_positive_slack = TimerDuration::zero();
899        let mut best_negative_slack = TimerDuration::max();
900
901        if self.max_ticks == 0 {
902            return TimerDuration::new(zx::BootDuration::from_millis(1), 0);
903        }
904        let duration_slack: TimerDuration = duration.into();
905
906        for res1 in self.resolutions.iter() {
907            let smallest_unit = TimerDuration::new(*res1, 1);
908            let max_tick_at_res = TimerDuration::new(*res1, self.max_ticks);
909
910            let smallest_slack_larger_than_duration = smallest_unit > duration_slack;
911            let largest_slack_smaller_than_duration = max_tick_at_res < duration_slack;
912
913            if smallest_slack_larger_than_duration {
914                if duration_slack == TimerDuration::zero() {
915                    best_negative_slack = TimerDuration::zero();
916                } else if smallest_unit < best_negative_slack {
917                    best_negative_slack = smallest_unit;
918                }
919            }
920            if largest_slack_smaller_than_duration {
921                if max_tick_at_res > best_positive_slack
922                    || best_positive_slack == TimerDuration::zero()
923                {
924                    best_positive_slack = max_tick_at_res;
925                }
926            }
927
928            // "Regular" case.
929            if !smallest_slack_larger_than_duration && !largest_slack_smaller_than_duration {
930                // Check whether duration divides evenly into the available slack options
931                // for this resolution.  If it does, then that is the slack we're looking for.
932                let q = duration_slack / smallest_unit;
933                let d = smallest_unit * q;
934                if d == duration_slack {
935                    // Exact match, we can return right now.
936                    return d;
937                } else {
938                    // Not an exact match, so q ticks is before, but q+1 is after.
939                    if d > best_positive_slack {
940                        best_positive_slack = TimerDuration::new_with_resolution(&smallest_unit, q);
941                    }
942                    let d_plus = TimerDuration::new_with_resolution(&smallest_unit, q + 1);
943                    if d_plus < best_negative_slack {
944                        best_negative_slack = d_plus;
945                    }
946                }
947            }
948        }
949
950        let p_slack = duration - best_positive_slack.duration();
951        let n_slack = best_negative_slack.duration() - duration;
952
953        // If the closest approximation is 0ns, then we can not advance time, so we reject it.
954        // Otherwise pick the smallest slack.  Note that when we pick the best positive slack,
955        // we will wake *before* the actual deadline.  In multi-resolution counters, this enables
956        // us to pick a finer count in the next go.
957        let ret = if p_slack < n_slack && best_positive_slack.duration().into_nanos() > 0 {
958            best_positive_slack
959        } else {
960            best_negative_slack
961        };
962        debug!("TimerConfig: picked slack: {} for duration: {}", ret, format_duration(duration));
963        assert!(
964            ret.duration().into_nanos() >= 0,
965            "ret: {}, p_slack: {}, n_slack: {}, orig.duration: {}\n\tbest_p_slack: {}\n\tbest_n_slack: {}\n\ttarget: {}\n\t 1: {} 2: {:?}, 3: {:?}",
966            ret,
967            format_duration(p_slack),
968            format_duration(n_slack),
969            format_duration(duration),
970            best_positive_slack,
971            best_negative_slack,
972            duration_slack,
973            p_slack != zx::BootDuration::ZERO,
974            p_slack,
975            zx::BootDuration::ZERO,
976        );
977        ret
978    }
979}
980
981async fn get_timer_properties(hrtimer: &Box<dyn TimerOps>) -> TimerConfig {
982    debug!("get_timer_properties: requesting timer properties.");
983    hrtimer.get_timer_properties().await
984}
985
986/// The state of a single hardware timer that we must bookkeep.
987struct TimerState {
988    // The task waiting for the proximate timer to expire.
989    task: fasync::Task<()>,
990    // The deadline that the above task is waiting for.
991    deadline: fasync::BootInstant,
992}
993
994/// The command loop for timer interaction.  All changes to the wake alarm device programming
995/// come in form of commands through `cmd`.
996///
997/// Args:
998/// - `snd`: the send end of `cmd` below, a clone is given to each spawned sub-task.
999/// - `cmds``: the input queue of alarm related commands.
1000/// - `timer_proxy`: the FIDL API proxy for interacting with the hardware device.
1001/// - `inspect`: the inspect node to record loop info into.
1002async fn wake_timer_loop(
1003    scope: fasync::ScopeHandle,
1004    snd: mpsc::Sender<Cmd>,
1005    mut cmds: mpsc::Receiver<Cmd>,
1006    timer_proxy: Box<dyn TimerOps>,
1007    inspect: finspect::Node,
1008    utc_transform: Rc<RefCell<fxr::UtcClockTransform>>,
1009) {
1010    debug!("wake_timer_loop: started");
1011
1012    let mut timers = timers::Heap::new(utc_transform.clone());
1013    let timer_config = get_timer_properties(&timer_proxy).await;
1014
1015    // Keeps the currently executing HrTimer closure.  This is not read from, but
1016    // keeps the timer task active.
1017    #[allow(clippy::collection_is_never_read)]
1018    let mut hrtimer_status: Option<TimerState> = None;
1019
1020    // Initialize inspect properties. This must be done only once.
1021    //
1022    // Take note that these properties are updated when the `cmds` loop runs.
1023    // This means that repeated reads while no `cmds` activity occurs will return
1024    // old readings.  This is to ensure a consistent ability to replay the last
1025    // loop run if needed.
1026    let now_prop = inspect.create_int("now_ns", 0);
1027    let now_formatted_prop = inspect.create_string("now_formatted", "");
1028    let pending_timers_count_prop = inspect.create_uint("pending_timers_count", 0);
1029    let pending_timers_prop = inspect.create_string("pending_timers", "");
1030    let _deadline_histogram_prop = inspect.create_int_exponential_histogram(
1031        "requested_deadlines_ns",
1032        finspect::ExponentialHistogramParams {
1033            floor: 0,
1034            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1035            // Allows capturing deadlines up to dozens of days.
1036            step_multiplier: 10,
1037            buckets: 16,
1038        },
1039    );
1040    let slack_histogram_prop = inspect.create_int_exponential_histogram(
1041        "slack_ns",
1042        finspect::ExponentialHistogramParams {
1043            floor: 0,
1044            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1045            step_multiplier: 10,
1046            buckets: 16,
1047        },
1048    );
1049    let schedule_delay_prop = inspect.create_int_exponential_histogram(
1050        "schedule_delay_ns",
1051        finspect::ExponentialHistogramParams {
1052            floor: 0,
1053            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1054            step_multiplier: 10,
1055            buckets: 16,
1056        },
1057    );
1058    let boot_deadlines_count_prop = inspect.create_uint("boot_deadlines_count", 0);
1059    let utc_deadlines_count_prop = inspect.create_uint("utc_deadlines_count", 0);
1060    // Internals of what was programmed into the wake alarms hardware.
1061    let hw_node = inspect.create_child("hardware");
1062    let current_hw_deadline_prop = hw_node.create_string("current_deadline", "");
1063    let remaining_until_alarm_prop = hw_node.create_string("remaining_until_alarm", "");
1064
1065    // Debug nodes for b/454085350.
1066    let debug_node = inspect.create_child("debug_node");
1067    let start_notify_setup_count = debug_node.create_int("start_notify_setup", 0);
1068    let start_count = debug_node.create_int("start_count", 0);
1069    let responder_count = debug_node.create_int("responder_count", 0);
1070    let stop_count = debug_node.create_int("stop", 0);
1071    let stop_responder_count = debug_node.create_int("stop_responder", 0);
1072    let stop_hrtimer_count = debug_node.create_int("stop_hrtimer", 0);
1073    let schedule_hrtimer_count = debug_node.create_int("schedule_hrtimer", 0);
1074    let alarm_count = debug_node.create_int("alarm", 0);
1075    let alarm_fidl_count = debug_node.create_int("alarm_fidl", 0);
1076    let alarm_driver_count = debug_node.create_int("alarm_driver", 0);
1077    let utc_update_count = debug_node.create_int("utc_update", 0);
1078    let status_count = debug_node.create_int("status", 0);
1079    let loop_count = debug_node.create_int("loop_count", 0);
1080
1081    let hrtimer_node = debug_node.create_child("hrtimer");
1082
1083    while let Some(cmd) = cmds.next().await {
1084        let _i = ScopedInc::new(&loop_count);
1085        trace::duration!(c"alarms", c"Cmd");
1086        // Use a consistent notion of "now" across commands.
1087        let now = fasync::BootInstant::now();
1088        now_prop.set(now.into_nanos());
1089        trace::instant!(c"alarms", c"wake_timer_loop", trace::Scope::Process, "now" => now.into_nanos());
1090        match cmd {
1091            Cmd::Start { conn_id, deadline, mode, alarm_id, responder } => {
1092                let _i = ScopedInc::new(&start_count);
1093                trace::duration!(c"alarms", c"Cmd::Start");
1094                fuchsia_trace::flow_step!(
1095                    c"alarms",
1096                    c"hrtimer_lifecycle",
1097                    timers::get_trace_id(&alarm_id)
1098                );
1099                // NOTE: hold keep_alive until all work is done.
1100                debug!(
1101                    "wake_timer_loop: START alarm_id: \"{}\", conn_id: {:?}\n\tdeadline: {}\n\tnow:      {}",
1102                    alarm_id,
1103                    conn_id,
1104                    deadline,
1105                    format_timer(now.into()),
1106                );
1107
1108                defer! {
1109                    let _i = ScopedInc::new(&start_notify_setup_count);
1110                    // This is the only option that requires further action.
1111                    if let Some(mode) = mode {
1112                        if let fta::SetMode::NotifySetupDone(setup_done) = mode {
1113                            // Must signal once the setup is completed.
1114                            signal(&setup_done);
1115                            debug!("wake_timer_loop: START: setup_done signaled");
1116                        };
1117                    }
1118                }
1119                let deadline_boot = deadline.as_boot(&*utc_transform.borrow());
1120
1121                // TODO: b/444236931: re-enable.
1122                //// Bookkeeping, record the incidence of deadline types.
1123                //deadline_histogram_prop.insert((deadline_boot - now).into_nanos());
1124                match deadline {
1125                    timers::Deadline::Boot(_) => boot_deadlines_count_prop.add(1),
1126                    timers::Deadline::Utc(_) => utc_deadlines_count_prop.add(1),
1127                };
1128
1129                if timers::Heap::expired(now, deadline_boot) {
1130                    trace::duration!(c"alarms", c"Cmd::Start:immediate");
1131                    fuchsia_trace::flow_step!(
1132                        c"alarms",
1133                        c"hrtimer_lifecycle",
1134                        timers::get_trace_id(&alarm_id)
1135                    );
1136                    // A timer set into now or the past expires right away.
1137                    let (_lease, keep_alive) = zx::EventPair::create();
1138                    debug!(
1139                        "[{}] wake_timer_loop: bogus lease {:?}",
1140                        line!(),
1141                        &keep_alive.get_koid().unwrap()
1142                    );
1143
1144                    {
1145                        let _i1 = ScopedInc::new(&responder_count);
1146                        if let Err(e) = responder
1147                            .send(&alarm_id, Ok(keep_alive))
1148                            .expect("responder is always present")
1149                        {
1150                            error!(
1151                                "wake_timer_loop: conn_id: {conn_id:?}, alarm: {alarm_id}: could not notify, dropping: {e}",
1152                            );
1153                        } else {
1154                            debug!(
1155                                "wake_timer_loop: conn_id: {conn_id:?}, alarm: {alarm_id}: EXPIRED IMMEDIATELY\n\tdeadline({}) <= now({})\n\tfull deadline: {}",
1156                                format_timer(deadline_boot.into()),
1157                                format_timer(now.into()),
1158                                deadline,
1159                            )
1160                        }
1161                    }
1162                } else {
1163                    trace::duration!(c"alarms", c"Cmd::Start:regular");
1164                    fuchsia_trace::flow_step!(
1165                        c"alarms",
1166                        c"hrtimer_lifecycle",
1167                        timers::get_trace_id(&alarm_id)
1168                    );
1169                    // A timer scheduled for the future gets inserted into the timer heap.
1170                    let was_empty = timers.is_empty();
1171
1172                    let deadline_before = timers.peek_deadline_as_boot();
1173                    let node = match deadline {
1174                        timers::Deadline::Boot(_) => {
1175                            timers.new_node_boot(deadline_boot, alarm_id, conn_id, responder)
1176                        }
1177                        timers::Deadline::Utc(d) => {
1178                            timers.new_node_utc(d, alarm_id, conn_id, responder)
1179                        }
1180                    };
1181                    timers.push(node);
1182                    let deadline_after = timers.peek_deadline_as_boot();
1183
1184                    let deadline_changed = is_deadline_changed(deadline_before, deadline_after);
1185                    let needs_cancel = !was_empty && deadline_changed;
1186                    let needs_reschedule = was_empty || deadline_changed;
1187
1188                    if needs_reschedule {
1189                        // Always schedule the proximate deadline.
1190                        let schedulable_deadline = deadline_after.unwrap_or(deadline_boot);
1191                        if needs_cancel {
1192                            log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1193                        }
1194                        hrtimer_status = Some(log_long_op!(schedule_hrtimer(
1195                            scope.clone(),
1196                            now,
1197                            &timer_proxy,
1198                            schedulable_deadline,
1199                            snd.clone(),
1200                            &timer_config,
1201                            &schedule_delay_prop,
1202                            &hrtimer_node,
1203                        )));
1204                    }
1205                }
1206            }
1207            Cmd::StopById { timer_id, done } => {
1208                let _i = ScopedInc::new(&stop_count);
1209                defer! {
1210                    signal(&done);
1211                }
1212                trace::duration!(c"alarms", c"Cmd::StopById", "alarm_id" => timer_id.alarm());
1213                fuchsia_trace::flow_step!(
1214                    c"alarms",
1215                    c"hrtimer_lifecycle",
1216                    timers::get_trace_id(&timer_id.alarm())
1217                );
1218                debug!("wake_timer_loop: STOP timer: {}", timer_id);
1219                let deadline_before = timers.peek_deadline_as_boot();
1220
1221                if let Some(timer_node) = timers.remove_by_id(&timer_id) {
1222                    let deadline_after = timers.peek_deadline_as_boot();
1223
1224                    {
1225                        let _i = ScopedInc::new(&stop_responder_count);
1226                        if let Some(res) = timer_node
1227                            .get_responder()
1228                            .send(timer_node.id().alarm(), Err(fta::WakeAlarmsError::Dropped))
1229                        {
1230                            // We must reply to the responder to keep the connection open.
1231                            res.expect("infallible");
1232                        }
1233                    }
1234                    if is_deadline_changed(deadline_before, deadline_after) {
1235                        let _i = ScopedInc::new(&stop_hrtimer_count);
1236                        log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1237                    }
1238                    if let Some(deadline) = deadline_after {
1239                        let _i = ScopedInc::new(&schedule_hrtimer_count);
1240                        // Reschedule the hardware timer if the removed timer is the earliest one,
1241                        // and another one exists.
1242                        let new_timer_state = log_long_op!(schedule_hrtimer(
1243                            scope.clone(),
1244                            now,
1245                            &timer_proxy,
1246                            deadline,
1247                            snd.clone(),
1248                            &timer_config,
1249                            &schedule_delay_prop,
1250                            &hrtimer_node,
1251                        ));
1252                        let old_hrtimer_status = hrtimer_status.replace(new_timer_state);
1253                        if let Some(task) = old_hrtimer_status.map(|ev| ev.task) {
1254                            // Allow the task to complete. Since this task should have been
1255                            // canceled or completed already, this call should not block for
1256                            // a long time.
1257                            log_long_op!(task);
1258                        }
1259                    } else {
1260                        // No next timer, clean up the hrtimer status.
1261                        hrtimer_status = None;
1262                    }
1263                } else {
1264                    debug!("wake_timer_loop: STOP: no active timer to stop: {}", timer_id);
1265                }
1266            }
1267            Cmd::Alarm { expired_deadline, keep_alive } => {
1268                let _i = ScopedInc::new(&alarm_count);
1269
1270                trace::duration!(c"alarms", c"Cmd::Alarm");
1271                // Expire all eligible timers, based on "now".  This is because
1272                // we may have woken up earlier than the actual deadline. This
1273                // happens for example if the timer can not make the actual
1274                // deadline and needs to be re-programmed.
1275                debug!(
1276                    "wake_timer_loop: ALARM!!! reached deadline: {}, wakey-wakey! {:?}",
1277                    format_timer(expired_deadline.into()),
1278                    keep_alive.get_koid().unwrap(),
1279                );
1280                let expired_count =
1281                    notify_all(&mut timers, &keep_alive, now, &slack_histogram_prop)
1282                        .expect("notification succeeds");
1283                if expired_count == 0 {
1284                    // This could be a resolution switch, or a straggler notification.
1285                    // Either way, the hardware timer is still ticking, cancel it.
1286                    debug!("wake_timer_loop: no expired alarms, reset hrtimer state");
1287                    log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1288                }
1289                // There is a timer to reschedule, do that now.
1290                hrtimer_status = match timers.peek_deadline_as_boot() {
1291                    None => None,
1292                    Some(deadline) => Some(log_long_op!(schedule_hrtimer(
1293                        scope.clone(),
1294                        now,
1295                        &timer_proxy,
1296                        deadline,
1297                        snd.clone(),
1298                        &timer_config,
1299                        &schedule_delay_prop,
1300                        &hrtimer_node,
1301                    ))),
1302                }
1303            }
1304            Cmd::AlarmFidlError { expired_deadline, error } => {
1305                let _i = ScopedInc::new(&alarm_fidl_count);
1306
1307                trace::duration!(c"alarms", c"Cmd::AlarmFidlError");
1308                // We do not have a wake lease, so the system may sleep before
1309                // we get to schedule a new timer. We have no way to avoid it
1310                // today.
1311                warn!(
1312                    "wake_timer_loop: FIDL error: {:?}, deadline: {}, now: {}",
1313                    error,
1314                    format_timer(expired_deadline.into()),
1315                    format_timer(now.into()),
1316                );
1317                // Manufacture a fake lease to make the code below work.
1318                // Maybe use Option instead?
1319                let (_dummy_lease, peer) = zx::EventPair::create();
1320                debug!(
1321                    "bogus lease: {:?} fidl error [{}:{}]",
1322                    &peer.get_koid().unwrap(),
1323                    file!(),
1324                    line!()
1325                );
1326                notify_all(&mut timers, &peer, now, &slack_histogram_prop)
1327                    .expect("notification succeeds");
1328                hrtimer_status = match timers.peek_deadline_as_boot() {
1329                    None => None, // No remaining timers, nothing to schedule.
1330                    Some(deadline) => Some(log_long_op!(schedule_hrtimer(
1331                        scope.clone(),
1332                        now,
1333                        &timer_proxy,
1334                        deadline,
1335                        snd.clone(),
1336                        &timer_config,
1337                        &schedule_delay_prop,
1338                        &hrtimer_node,
1339                    ))),
1340                }
1341            }
1342            Cmd::AlarmDriverError {
1343                expired_deadline,
1344                error,
1345                timer_config_id,
1346                resolution_nanos,
1347                ticks,
1348            } => {
1349                let _i = ScopedInc::new(&alarm_driver_count);
1350
1351                trace::duration!(c"alarms", c"Cmd::AlarmDriverError");
1352                let (_dummy_lease, peer) = zx::EventPair::create();
1353                debug!(
1354                    "bogus lease: {:?} driver error. [{}:{}]",
1355                    &peer.get_koid().unwrap(),
1356                    file!(),
1357                    line!()
1358                );
1359                notify_all(&mut timers, &peer, now, &slack_histogram_prop)
1360                    .expect("notification succeeds");
1361                match error {
1362                    fidl_fuchsia_hardware_hrtimer::DriverError::Canceled => {
1363                        // Nothing to do here, cancelation is handled in Stop code.
1364                        debug!(
1365                            "wake_timer_loop: CANCELED timer at deadline: {}",
1366                            format_timer(expired_deadline.into())
1367                        );
1368                    }
1369                    _ => {
1370                        error!(
1371                            "wake_timer_loop: DRIVER SAYS: {:?}, deadline: {}, now: {}\n\ttimer_id={}\n\tresolution={}\n\tticks={}",
1372                            error,
1373                            format_timer(expired_deadline.into()),
1374                            format_timer(now.into()),
1375                            timer_config_id,
1376                            resolution_nanos,
1377                            ticks,
1378                        );
1379                        // We do not have a wake lease, so the system may sleep before
1380                        // we get to schedule a new timer. We have no way to avoid it
1381                        // today.
1382                        hrtimer_status = match timers.peek_deadline_as_boot() {
1383                            None => None,
1384                            Some(deadline) => Some(log_long_op!(schedule_hrtimer(
1385                                scope.clone(),
1386                                now,
1387                                &timer_proxy,
1388                                deadline,
1389                                snd.clone(),
1390                                &timer_config,
1391                                &schedule_delay_prop,
1392                                &hrtimer_node,
1393                            ))),
1394                        }
1395                    }
1396                }
1397            }
1398            Cmd::UtcUpdated { transform } => {
1399                let _i = ScopedInc::new(&utc_update_count);
1400
1401                trace::duration!(c"alarms", c"Cmd::UtcUpdated");
1402                debug!("wake_timer_loop: applying new clock transform: {transform:?}");
1403
1404                // Assigning to this shared reference updates the deadlines of all
1405                // UTC timers.
1406                *utc_transform.borrow_mut() = transform;
1407
1408                // Reschedule the hardware timer with the now-current deadline if there is an
1409                // active timer.
1410                if hrtimer_status.is_some() {
1411                    log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1412                    // Should we request a wake lock here?
1413                    hrtimer_status = match timers.peek_deadline_as_boot() {
1414                        None => None,
1415                        Some(deadline) => Some(log_long_op!(schedule_hrtimer(
1416                            scope.clone(),
1417                            now,
1418                            &timer_proxy,
1419                            deadline,
1420                            snd.clone(),
1421                            &timer_config,
1422                            &schedule_delay_prop,
1423                            &hrtimer_node,
1424                        ))),
1425                    }
1426                }
1427            }
1428        }
1429
1430        {
1431            let _i = ScopedInc::new(&status_count);
1432
1433            // Print and record diagnostics after each iteration, record the
1434            // duration for performance awareness.  Note that iterations happen
1435            // only occasionally, so these stats can remain unchanged for a long
1436            // time.
1437            trace::duration!(c"timekeeper", c"inspect");
1438            let now_formatted = format_timer(now.into());
1439            debug!("wake_timer_loop: now:                             {}", &now_formatted);
1440            now_formatted_prop.set(&now_formatted);
1441
1442            let pending_timers_count: u64 =
1443                timers.timer_count().try_into().expect("always convertible");
1444            debug!("wake_timer_loop: currently pending timer count:   {}", pending_timers_count);
1445            pending_timers_count_prop.set(pending_timers_count);
1446
1447            let pending_timers = format!("{}", timers);
1448            debug!("wake_timer_loop: currently pending timers:        \n\t{}", &timers);
1449            pending_timers_prop.set(&pending_timers);
1450
1451            let current_deadline: String = hrtimer_status
1452                .as_ref()
1453                .map(|s| format!("{}", format_timer(s.deadline.into())))
1454                .unwrap_or_else(|| "(none)".into());
1455            debug!("wake_timer_loop: current hardware timer deadline: {:?}", current_deadline);
1456            current_hw_deadline_prop.set(&current_deadline);
1457
1458            let remaining_duration_until_alarm = hrtimer_status
1459                .as_ref()
1460                .map(|s| format!("{}", format_duration((s.deadline - now).into())))
1461                .unwrap_or_else(|| "(none)".into());
1462            debug!(
1463                "wake_timer_loop: remaining duration until alarm:  {}",
1464                &remaining_duration_until_alarm
1465            );
1466            remaining_until_alarm_prop.set(&remaining_duration_until_alarm);
1467            debug!("---");
1468        }
1469    }
1470
1471    // Prod code should not see this loop ever exiting. the wake alarm manager
1472    // should run forever.
1473    log::info!("wake_timer_loop: exiting. This is only correct in test code.");
1474}
1475
1476/// Schedules a wake alarm.
1477///
1478/// # Args:
1479///
1480/// - `scope`: used to spawn async tasks.
1481/// - `now`: the time instant used as the value of current instant.
1482/// - `hrtimer`: the proxy for the hrtimer device driver.
1483/// - `deadline`: the time instant in the future at which the alarm should fire.
1484/// - `command_send`: the sender channel to use when the timer expires.
1485/// - `timer_config`: a configuration of the hardware timer showing supported resolutions and
1486///   max tick value.
1487/// - `schedule_delay_histogram`: inspect instrumentation.
1488/// - `debug_node`: used for keeping debug counters.
1489async fn schedule_hrtimer(
1490    scope: fasync::ScopeHandle,
1491    now: fasync::BootInstant,
1492    hrtimer: &Box<dyn TimerOps>,
1493    deadline: fasync::BootInstant,
1494    mut command_send: mpsc::Sender<Cmd>,
1495    timer_config: &TimerConfig,
1496    _schedule_delay_histogram: &finspect::IntExponentialHistogramProperty,
1497    debug_node: &finspect::Node,
1498) -> TimerState {
1499    let timeout = std::cmp::max(zx::BootDuration::ZERO, deadline - now);
1500    trace::duration!(c"alarms", c"schedule_hrtimer", "timeout" => timeout.into_nanos());
1501    // When signaled, the hrtimer has been scheduled.
1502    let hrtimer_scheduled = zx::Event::create();
1503
1504    let schedule_count = debug_node.create_int("schedule", 0);
1505    let hrtimer_wait_count = debug_node.create_int("hrtimer_wait", 0);
1506    let wait_signaled_count = debug_node.create_int("wait_signaled", 0);
1507
1508    let _sc = ScopedInc::new(&schedule_count);
1509
1510    debug!(
1511        "schedule_hrtimer:\n\tnow: {}\n\tdeadline: {}\n\ttimeout: {}",
1512        format_timer(now.into()),
1513        format_timer(deadline.into()),
1514        format_duration(timeout),
1515    );
1516
1517    let slack = timer_config.pick_setting(timeout);
1518    let resolution_nanos = slack.resolution.into_nanos();
1519    // The driver will reject "0" ticks, even though it probably shouldn't. See for details:
1520    // b/437177931.
1521    let useful_ticks = std::cmp::max(MIN_USEFUL_TICKS, slack.ticks());
1522
1523    trace::instant!(c"alarms", c"hrtimer:programmed",
1524        trace::Scope::Process,
1525        "resolution_ns" => resolution_nanos,
1526        "ticks" => useful_ticks
1527    );
1528    let timer_config_id = timer_config.id;
1529    let start_and_wait_fut = {
1530        let _sc = ScopedInc::new(&hrtimer_wait_count);
1531        hrtimer.start_and_wait(
1532            timer_config.id,
1533            &ffhh::Resolution::Duration(resolution_nanos),
1534            useful_ticks,
1535            clone_handle(&hrtimer_scheduled),
1536        )
1537    };
1538
1539    let hrtimer_scheduled_if_error = clone_handle(&hrtimer_scheduled);
1540    let hrtimer_task = scope.spawn_local(async move {
1541        debug!("hrtimer_task: waiting for hrtimer driver response");
1542        trace::instant!(c"alarms", c"hrtimer:started", trace::Scope::Process);
1543        let response = start_and_wait_fut.await;
1544        trace::instant!(c"alarms", c"hrtimer:response", trace::Scope::Process);
1545        match response {
1546            Err(TimerOpsError::Fidl(e)) => {
1547                defer! {
1548                    // Allow hrtimer_scheduled to proceed anyways.
1549                    signal(&hrtimer_scheduled_if_error);
1550                }
1551                trace::instant!(c"alarms", c"hrtimer:response:fidl_error", trace::Scope::Process);
1552                warn!("hrtimer_task: hrtimer FIDL error: {:?}", e);
1553                command_send
1554                    .start_send(Cmd::AlarmFidlError { expired_deadline: now, error: e })
1555                    .unwrap();
1556                // BAD: no way to keep alive.
1557            }
1558            Err(TimerOpsError::Driver(e)) => {
1559                defer! {
1560                    // This should be idempotent if the error occurs after
1561                    // the timer was scheduled.
1562                    signal(&hrtimer_scheduled_if_error);
1563                }
1564                let driver_error_str = format!("{:?}", e);
1565                trace::instant!(c"alarms", c"hrtimer:response:driver_error", trace::Scope::Process, "error" => &driver_error_str[..]);
1566                // This is very common. For example, a "timer canceled" event
1567                // will result in this code path being hit.
1568                debug!("schedule_hrtimer: hrtimer driver error: {:?}", e);
1569                command_send
1570                    .start_send(Cmd::AlarmDriverError {
1571                        expired_deadline: now,
1572                        error: e,
1573                        timer_config_id,
1574                        resolution_nanos,
1575                        ticks: useful_ticks,
1576                    })
1577                    .unwrap();
1578                // BAD: no way to keep alive.
1579            }
1580            Ok(keep_alive) => {
1581                trace::instant!(c"alarms", c"hrtimer:response:alarm", trace::Scope::Process);
1582                debug!("hrtimer: got alarm response: {:?}", keep_alive);
1583                // May trigger sooner than the deadline.
1584                command_send
1585                    .start_send(Cmd::Alarm { expired_deadline: deadline, keep_alive })
1586                    .unwrap();
1587            }
1588        }
1589        debug!("hrtimer_task: exiting task.");
1590        trace::instant!(c"alarms", c"hrtimer:task_exit", trace::Scope::Process);
1591    }).into();
1592    debug!("schedule_hrtimer: waiting for event to be signaled");
1593
1594    {
1595        let _i = ScopedInc::new(&wait_signaled_count);
1596        // We must wait here to ensure that the wake alarm has been scheduled.
1597        log_long_op!(wait_signaled(&hrtimer_scheduled));
1598    }
1599
1600    let now_after_signaled = fasync::BootInstant::now();
1601    let duration_until_scheduled: zx::BootDuration = (now_after_signaled - now).into();
1602    if duration_until_scheduled > zx::BootDuration::from_nanos(LONG_DELAY_NANOS) {
1603        trace::duration!(c"alarms", c"schedule_hrtimer:unusual_duration",
1604            "duration" => duration_until_scheduled.into_nanos());
1605        warn!(
1606            "unusual duration until hrtimer scheduled: {}",
1607            format_duration(duration_until_scheduled)
1608        );
1609    }
1610    // TODO: b/444236931: re-enable.
1611    //schedule_delay_histogram.insert(duration_until_scheduled.into_nanos());
1612    debug!("schedule_hrtimer: hrtimer wake alarm has been scheduled.");
1613    TimerState { task: hrtimer_task, deadline }
1614}
1615
1616/// Notify all `timers` that `reference_instant` has been reached.
1617///
1618/// The notified `timers` are removed from the list of timers to notify.
1619///
1620/// Args:
1621/// - `timers`: the collection of currently available timers.
1622/// - `lease_prototype`: an EventPair used as a wake lease.
1623/// - `reference_instant`: the time instant used as a reference for alarm notification.
1624///   All timers
1625fn notify_all(
1626    timers: &mut timers::Heap,
1627    lease_prototype: &zx::EventPair,
1628    reference_instant: fasync::BootInstant,
1629    _unusual_slack_histogram: &finspect::IntExponentialHistogramProperty,
1630) -> Result<usize> {
1631    trace::duration!(c"alarms", c"notify_all");
1632    let now = fasync::BootInstant::now();
1633    let mut expired = 0;
1634    while let Some(timer_node) = timers.maybe_expire_earliest(reference_instant) {
1635        expired += 1;
1636        // How much later than requested did the notification happen.
1637        let deadline = timer_node.get_boot_deadline();
1638        let alarm_id = timer_node.id().alarm().to_string();
1639        trace::duration!(c"alarms", c"notify_all:notified", "alarm_id" => &*alarm_id);
1640        fuchsia_trace::flow_step!(c"alarms", c"hrtimer_lifecycle", timers::get_trace_id(&alarm_id));
1641        let conn_id = timer_node.id().conn.clone();
1642        let slack: zx::BootDuration = deadline - now;
1643        if slack < zx::BootDuration::from_nanos(-LONG_DELAY_NANOS) {
1644            trace::duration!(c"alarms", c"schedule_hrtimer:unusual_slack", "slack" => slack.into_nanos());
1645            // This alarm triggered noticeably later than it should have.
1646            warn!(
1647                "alarm id: {} had an unusually large slack: {}",
1648                alarm_id,
1649                format_duration(slack)
1650            );
1651        }
1652        if slack < zx::BootDuration::ZERO {
1653            // TODO: b/444236931: re-enable.
1654            //unusual_slack_histogram.insert(-slack.into_nanos());
1655        }
1656        debug!(
1657            concat!(
1658                "wake_alarm_loop: ALARM alarm_id: \"{}\"\n\tdeadline: {},\n\tconn_id: {:?},\n\t",
1659                "reference_instant: {},\n\tnow: {},\n\tslack: {}",
1660            ),
1661            alarm_id,
1662            format_timer(deadline.into()),
1663            conn_id,
1664            format_timer(reference_instant.into()),
1665            format_timer(now.into()),
1666            format_duration(slack),
1667        );
1668        let lease = clone_handle(lease_prototype);
1669        trace::instant!(c"alarms", c"notify", trace::Scope::Process, "alarm_id" => &alarm_id[..], "conn_id" => conn_id);
1670        if let Some(Err(e)) = timer_node.get_responder().send(timer_node.id().alarm(), Ok(lease)) {
1671            error!("could not signal responder: {:?}", e);
1672        }
1673        trace::instant!(c"alarms", c"notified", trace::Scope::Process);
1674    }
1675    trace::instant!(c"alarms", c"notify", trace::Scope::Process, "expired_count" => expired);
1676    debug!("notify_all: expired count: {}", expired);
1677    Ok(expired)
1678    // A new timer is not scheduled yet here.
1679}
1680
1681/// The hrtimer driver service directory.  hrtimer driver APIs appear as randomly
1682/// named files in this directory. They are expected to come and go.
1683const HRTIMER_DIRECTORY: &str = "/dev/class/hrtimer";
1684
1685/// Connects to the high resolution timer device driver.
1686///
1687/// This function watches the hrtimer device directory and connects to the first
1688/// available hrtimer device.
1689///
1690/// # Returns
1691/// A `Result` containing a `ffhh::DeviceProxy` on success, or an error if
1692/// the connection fails.
1693pub async fn connect_to_hrtimer_async() -> Result<ffhh::DeviceProxy> {
1694    debug!("connect_to_hrtimer: trying directory: {}", HRTIMER_DIRECTORY);
1695    let dir =
1696        fuchsia_fs::directory::open_in_namespace(HRTIMER_DIRECTORY, fidl_fuchsia_io::PERM_READABLE)
1697            .with_context(|| format!("Opening {}", HRTIMER_DIRECTORY))?;
1698    let path = device_watcher::watch_for_files(&dir)
1699        .await
1700        .with_context(|| format!("Watching for files in {}", HRTIMER_DIRECTORY))?
1701        .try_next()
1702        .await
1703        .with_context(|| format!("Getting a file from {}", HRTIMER_DIRECTORY))?;
1704    let path = path.ok_or_else(|| anyhow!("Could not find {}", HRTIMER_DIRECTORY))?;
1705    let path = path
1706        .to_str()
1707        .ok_or_else(|| anyhow!("Could not find a valid str for {}", HRTIMER_DIRECTORY))?;
1708    connect_to_named_protocol_at_dir_root::<ffhh::DeviceMarker>(&dir, path)
1709        .context("Failed to connect built-in service")
1710}
1711
1712#[cfg(test)]
1713mod tests {
1714    use super::*;
1715    use assert_matches::assert_matches;
1716    use diagnostics_assertions::{AnyProperty, assert_data_tree};
1717    use fuchsia_async::TestExecutor;
1718    use futures::select;
1719    use test_case::test_case;
1720    use test_util::{assert_gt, assert_lt};
1721
1722    fn fake_wake_lease() -> fidl_fuchsia_power_system::LeaseToken {
1723        let (_lease, peer) = zx::EventPair::create();
1724        peer
1725    }
1726
1727    #[test]
1728    fn timer_duration_no_overflow() {
1729        let duration1 = TimerDuration {
1730            resolution: zx::BootDuration::from_seconds(100_000_000),
1731            ticks: u64::MAX,
1732        };
1733        let duration2 = TimerDuration {
1734            resolution: zx::BootDuration::from_seconds(110_000_000),
1735            ticks: u64::MAX,
1736        };
1737        assert_eq!(duration1, duration1);
1738        assert_eq!(duration2, duration2);
1739
1740        assert_lt!(duration1, duration2);
1741        assert_gt!(duration2, duration1);
1742    }
1743
1744    #[test_case(
1745        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1746        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1747    )]
1748    #[test_case(
1749        TimerDuration::new(zx::BootDuration::from_nanos(1), 10),
1750        TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1751    )]
1752    #[test_case(
1753        TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1754        TimerDuration::new(zx::BootDuration::from_nanos(1), 10)
1755    )]
1756    #[test_case(
1757        TimerDuration::new(zx::BootDuration::from_micros(1), 1),
1758        TimerDuration::new(zx::BootDuration::from_nanos(1), 1000)
1759    )]
1760    fn test_slack_eq(one: TimerDuration, other: TimerDuration) {
1761        assert_eq!(one, other);
1762    }
1763
1764    #[test_case(
1765        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1766        TimerDuration::new(zx::BootDuration::from_nanos(1), 2)
1767    )]
1768    #[test_case(
1769        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1770        TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1771    )]
1772    fn test_slack_lt(one: TimerDuration, other: TimerDuration) {
1773        assert_lt!(one, other);
1774    }
1775
1776    #[test_case(
1777        TimerDuration::new(zx::BootDuration::from_nanos(1), 2),
1778        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1779    )]
1780    #[test_case(
1781        TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1782        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1783    )]
1784    fn test_slack_gt(one: TimerDuration, other: TimerDuration) {
1785        assert_gt!(one, other);
1786    }
1787
1788    #[test_case(
1789        vec![zx::BootDuration::from_nanos(1)],
1790        100,
1791        zx::BootDuration::from_nanos(0),
1792        TimerDuration::new(zx::BootDuration::from_nanos(1), 1) ; "0ns becomes 1ns"
1793    )]
1794    #[test_case(
1795        vec![zx::BootDuration::from_nanos(1)],
1796        100,
1797        zx::BootDuration::from_nanos(50),
1798        TimerDuration::new(zx::BootDuration::from_nanos(1), 50) ; "Exact at 50x1ns"
1799    )]
1800    #[test_case(
1801        vec![zx::BootDuration::from_nanos(2)],
1802        100,
1803        zx::BootDuration::from_nanos(50),
1804        TimerDuration::new(zx::BootDuration::from_nanos(2), 25) ; "Exact at 25x2ns"
1805    )]
1806    #[test_case(
1807        vec![zx::BootDuration::from_nanos(3)],
1808        100,
1809        zx::BootDuration::from_nanos(50),
1810        // The closest duration is 51ns.
1811        TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "Inexact at 51ns"
1812    )]
1813    #[test_case(
1814        vec![
1815            zx::BootDuration::from_nanos(3),
1816            zx::BootDuration::from_nanos(4)
1817        ],
1818        100,
1819        zx::BootDuration::from_nanos(50),
1820        TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "3ns is a better resolution"
1821    )]
1822    #[test_case(
1823        vec![
1824            zx::BootDuration::from_nanos(1000),
1825        ],
1826        100,
1827        zx::BootDuration::from_nanos(50),
1828        TimerDuration::new(zx::BootDuration::from_nanos(1000), 1) ;
1829        "950ns negative slack is the best we can do"
1830    )]
1831    #[test_case(
1832        vec![
1833            zx::BootDuration::from_nanos(1),
1834        ],
1835        10,
1836        zx::BootDuration::from_nanos(50),
1837        TimerDuration::new(zx::BootDuration::from_nanos(1), 10) ;
1838        "10ns positive slack is the best we can do"
1839    )]
1840    #[test_case(
1841        vec![
1842            zx::BootDuration::from_millis(1),
1843            zx::BootDuration::from_micros(100),
1844            zx::BootDuration::from_micros(10),
1845            zx::BootDuration::from_micros(1),
1846        ],
1847        20,  // Make only one of the resolutions above match.
1848        zx::BootDuration::from_micros(150),
1849        TimerDuration::new(zx::BootDuration::from_micros(10), 15) ;
1850        "Realistic case with resolutions from driver, should be 15us"
1851    )]
1852    #[test_case(
1853        vec![
1854            zx::BootDuration::from_millis(1),
1855            zx::BootDuration::from_micros(100),
1856            zx::BootDuration::from_micros(10),
1857            zx::BootDuration::from_micros(1),
1858        ],
1859        2000,  // Make only one of the resolutions above match.
1860        zx::BootDuration::from_micros(6000),
1861        TimerDuration::new(zx::BootDuration::from_millis(1), 6) ;
1862        "Coarser exact unit wins"
1863    )]
1864    #[test_case(
1865        vec![
1866            zx::BootDuration::from_millis(1),
1867            zx::BootDuration::from_millis(10),
1868            zx::BootDuration::from_millis(100),
1869        ],
1870        1000,
1871        zx::BootDuration::from_micros(-10),
1872        TimerDuration::new(zx::BootDuration::from_millis(1), 1) ;
1873        "Negative duration gets the smallest timer duration"
1874    )]
1875    #[test_case(
1876        vec![
1877            zx::BootDuration::from_millis(1),
1878            zx::BootDuration::from_millis(10),
1879            zx::BootDuration::from_millis(100),
1880        ],
1881        1000,
1882        zx::BootDuration::ZERO,
1883        TimerDuration::new(zx::BootDuration::from_millis(1), 1) ;
1884        "Zero duration gets the smallest timer duration"
1885    )]
1886    fn test_pick_setting(
1887        resolutions: Vec<zx::BootDuration>,
1888        max_ticks: u64,
1889        duration: zx::BootDuration,
1890        expected: TimerDuration,
1891    ) {
1892        let config = TimerConfig::new_from_data(MAIN_TIMER_ID as u64, &resolutions[..], max_ticks);
1893        let actual = config.pick_setting(duration);
1894
1895        // .eq() does not work here, since we do not just require that the values
1896        // be equal, but also that the same resolution is used in both.
1897        assert_slack_eq(expected, actual);
1898    }
1899
1900    // TimerDuration assertion with human-friendly output in case of an error.
1901    fn assert_slack_eq(expected: TimerDuration, actual: TimerDuration) {
1902        let slack = expected.duration() - actual.duration();
1903        assert_eq!(
1904            actual.resolution(),
1905            expected.resolution(),
1906            "\n\texpected: {} ({})\n\tactual  : {} ({})\n\tslack: expected-actual={}",
1907            expected,
1908            format_duration(expected.duration()),
1909            actual,
1910            format_duration(actual.duration()),
1911            format_duration(slack)
1912        );
1913        assert_eq!(
1914            actual.ticks(),
1915            expected.ticks(),
1916            "\n\texpected: {} ({})\n\tactual  : {} ({})\n\tslack: expected-actual={}",
1917            expected,
1918            format_duration(expected.duration()),
1919            actual,
1920            format_duration(actual.duration()),
1921            format_duration(slack)
1922        );
1923    }
1924
1925    #[derive(Debug)]
1926    enum FakeCmd {
1927        SetProperties {
1928            resolutions: Vec<zx::BootDuration>,
1929            max_ticks: i64,
1930            keep_alive: zx::EventPair,
1931            done: zx::Event,
1932        },
1933    }
1934
1935    use std::cell::RefCell;
1936    use std::rc::Rc;
1937
1938    // A fake that emulates some aspects of the hrtimer driver.
1939    //
1940    // Specifically it can be configured with different resolutions, and will
1941    // bomb out if any waiting methods are called twice in a succession, without
1942    // canceling the timer in between.
1943    fn fake_hrtimer_connection(
1944        scope: fasync::ScopeHandle,
1945        rcv: mpsc::Receiver<FakeCmd>,
1946    ) -> ffhh::DeviceProxy {
1947        debug!("fake_hrtimer_connection: entry.");
1948        let (hrtimer, mut stream) =
1949            fidl::endpoints::create_proxy_and_stream::<ffhh::DeviceMarker>();
1950        scope.clone().spawn_local(async move {
1951            let mut rcv = rcv.fuse();
1952            let timer_properties = Rc::new(RefCell::new(None));
1953            let wake_lease = Rc::new(RefCell::new(None));
1954
1955            // Set to true when the hardware timer is supposed to be running.
1956            // Hardware timer may not be reprogrammed without canceling it first,
1957            // make sure the tests fail the same way as production would.
1958            let timer_running = Rc::new(RefCell::new(false));
1959
1960            loop {
1961                let timer_properties = timer_properties.clone();
1962                let wake_lease = wake_lease.clone();
1963                select! {
1964                    cmd = rcv.next() => {
1965                        debug!("fake_hrtimer_connection: cmd: {:?}", cmd);
1966                        match cmd {
1967                            Some(FakeCmd::SetProperties{ resolutions, max_ticks, keep_alive, done}) => {
1968                                let mut timer_props = vec![];
1969                                for v in 0..10 {
1970                                    timer_props.push(ffhh::TimerProperties {
1971                                        supported_resolutions: Some(
1972                                            resolutions.iter()
1973                                                .map(|d| ffhh::Resolution::Duration(d.into_nanos())).collect()),
1974                                        max_ticks: Some(max_ticks.try_into().unwrap()),
1975                                        // start_and_wait method works.
1976                                        supports_wait: Some(true),
1977                                        id: Some(v),
1978                                        ..Default::default()
1979                                        },
1980                                    );
1981                                }
1982                                *timer_properties.borrow_mut() = Some(timer_props);
1983                                *wake_lease.borrow_mut() = Some(keep_alive);
1984                                debug!("set timer properties to: {:?}", timer_properties);
1985                                signal(&done);
1986                            }
1987                            e => {
1988                                panic!("unrecognized command: {:?}", e);
1989                            }
1990                        }
1991                        // Set some responses if we have them.
1992                    },
1993                    event = stream.next() => {
1994                        debug!("fake_hrtimer_connection: event: {:?}", event);
1995                        if let Some(Ok(event)) = event {
1996                            match event {
1997                                ffhh::DeviceRequest::Start { responder, .. } => {
1998                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
1999                                    *timer_running.borrow_mut() = true;
2000                                    responder.send(Ok(())).expect("");
2001                                }
2002                                ffhh::DeviceRequest::Stop { responder, .. } => {
2003                                    *timer_running.borrow_mut() = false;
2004                                    responder.send(Ok(())).expect("");
2005                                }
2006                                ffhh::DeviceRequest::GetTicksLeft { responder, .. } => {
2007                                    responder.send(Ok(1)).expect("");
2008                                }
2009                                ffhh::DeviceRequest::SetEvent { responder, .. } => {
2010                                    responder.send(Ok(())).expect("");
2011                                }
2012                                ffhh::DeviceRequest::StartAndWait { id, resolution, ticks, setup_event, responder, .. } => {
2013                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2014                                    *timer_running.borrow_mut() = true;
2015                                    debug!("fake_hrtimer_connection: starting timer: \"{}\", resolution: {:?}, ticks: {}", id, resolution, ticks);
2016                                    let ticks: i64 = ticks.try_into().unwrap();
2017                                    let sleep_duration  = zx::BootDuration::from_nanos(ticks * match resolution {
2018                                        ffhh::Resolution::Duration(e) => e,
2019                                        _ => {
2020                                            error!("resolution has an unexpected value");
2021                                            1
2022                                        }
2023                                    });
2024                                    let timer_running_clone = timer_running.clone();
2025                                    scope.spawn_local(async move {
2026                                        // Signaling the setup event allows the client to proceed
2027                                        // with post-scheduling work.
2028                                        signal(&setup_event);
2029
2030                                        // Respond after the requested sleep time. In tests this will
2031                                        // be sleeping in fake time.
2032                                        fasync::Timer::new(sleep_duration).await;
2033                                        *timer_running_clone.borrow_mut() = false;
2034                                        responder.send(Ok(clone_handle(wake_lease.borrow().as_ref().unwrap()))).unwrap();
2035                                        debug!("StartAndWait: hrtimer expired");
2036                                    });
2037                                }
2038                                ffhh::DeviceRequest::StartAndWait2 { responder, .. } => {
2039                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2040                                    *timer_running.borrow_mut() = true;
2041                                    responder.send(Err(ffhh::DriverError::InternalError)).expect("");
2042                                }
2043                                ffhh::DeviceRequest::GetProperties { responder, .. } => {
2044                                    if (*timer_properties).borrow().is_none() {
2045                                        error!("timer_properties is empty, this is not what you want!");
2046                                    }
2047                                    responder
2048                                        .send(ffhh::Properties {
2049                                            timers_properties: (*timer_properties).borrow().clone(),
2050                                            ..Default::default()
2051                                        })
2052                                        .expect("");
2053                                }
2054                                ffhh::DeviceRequest::ReadTimer { responder, .. } => {
2055                                    responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2056                                }
2057                                ffhh::DeviceRequest::ReadClock { responder, .. } => {
2058                                    responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2059                                }
2060                                ffhh::DeviceRequest::_UnknownMethod { .. } => todo!(),
2061                            }
2062                        }
2063                    },
2064                }
2065            }
2066        });
2067        hrtimer
2068    }
2069
2070    fn clone_utc_clock(orig: &fxr::UtcClock) -> fxr::UtcClock {
2071        orig.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()
2072    }
2073
2074    struct TestContext {
2075        wake_proxy: fta::WakeAlarmsProxy,
2076        _scope: fasync::Scope,
2077        _cmd_tx: mpsc::Sender<FakeCmd>,
2078        // Use to manipulate the UTC clock from the test.
2079        utc_clock: fxr::UtcClock,
2080        utc_backstop: fxr::UtcInstant,
2081    }
2082
2083    impl TestContext {
2084        async fn new() -> Self {
2085            TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(0)).await;
2086
2087            let scope = fasync::Scope::new();
2088            let utc_backstop = fxr::UtcInstant::from_nanos(1000);
2089            let utc_clock =
2090                fxr::UtcClock::create(zx::ClockOpts::empty(), Some(utc_backstop)).unwrap();
2091            let utc_clone = clone_utc_clock(&utc_clock);
2092            let (mut cmd_tx, wake_proxy) = {
2093                let (tx, rx) = mpsc::channel::<FakeCmd>(0);
2094                let hrtimer_proxy = fake_hrtimer_connection(scope.to_handle(), rx);
2095
2096                let inspector = finspect::component::inspector();
2097                let alarms = Rc::new(Loop::new(
2098                    scope.to_handle(),
2099                    hrtimer_proxy,
2100                    inspector.root().create_child("test"),
2101                    utc_clone,
2102                ));
2103
2104                let (proxy, stream) =
2105                    fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
2106                scope.spawn_local(async move {
2107                    serve(alarms, stream).await;
2108                });
2109                (tx, proxy)
2110            };
2111
2112            let (_wake_lease, peer) = zx::EventPair::create();
2113            let done = zx::Event::create();
2114            cmd_tx
2115                .start_send(FakeCmd::SetProperties {
2116                    resolutions: vec![zx::Duration::from_nanos(1)],
2117                    max_ticks: 100,
2118                    keep_alive: peer,
2119                    done: done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2120                })
2121                .unwrap();
2122
2123            // Wait until hrtimer configuration has completed.
2124            assert_matches!(fasync::OnSignals::new(done, zx::Signals::EVENT_SIGNALED).await, Ok(_));
2125
2126            Self { wake_proxy, _scope: scope, _cmd_tx: cmd_tx, utc_clock, utc_backstop }
2127        }
2128    }
2129
2130    impl Drop for TestContext {
2131        fn drop(&mut self) {
2132            assert_matches!(TestExecutor::next_timer(), None, "Unexpected lingering timers");
2133        }
2134    }
2135
2136    #[fuchsia::test(allow_stalls = false)]
2137    async fn test_basic_timed_wait() {
2138        let ctx = TestContext::new().await;
2139
2140        let deadline = zx::BootInstant::from_nanos(100);
2141        let setup_done = zx::Event::create();
2142        let mut set_task = ctx.wake_proxy.set_and_wait(
2143            deadline.into(),
2144            fta::SetMode::NotifySetupDone(
2145                setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2146            ),
2147            "Hello".into(),
2148        );
2149
2150        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task).await, Poll::Pending);
2151
2152        let mut setup_done_task = fasync::OnSignals::new(setup_done, zx::Signals::EVENT_SIGNALED);
2153        assert_matches!(
2154            TestExecutor::poll_until_stalled(&mut setup_done_task).await,
2155            Poll::Ready(Ok(_)),
2156            "Setup event not triggered after scheduling an alarm"
2157        );
2158
2159        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(100)).await;
2160        assert_matches!(TestExecutor::poll_until_stalled(set_task).await, Poll::Ready(Ok(Ok(_))));
2161    }
2162
2163    #[fuchsia::test(allow_stalls = false)]
2164    async fn test_basic_timed_wait_notify() {
2165        const ALARM_ID: &str = "Hello";
2166        let ctx = TestContext::new().await;
2167
2168        let (notifier_client, mut notifier_stream) =
2169            fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2170        let setup_done = zx::Event::create();
2171        assert_matches!(
2172            ctx.wake_proxy
2173                .set(
2174                    notifier_client,
2175                    fidl::BootInstant::from_nanos(2),
2176                    fta::SetMode::NotifySetupDone(
2177                        setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()
2178                    ),
2179                    ALARM_ID,
2180                )
2181                .await,
2182            Ok(Ok(()))
2183        );
2184
2185        let mut done_task = fasync::OnSignals::new(setup_done, zx::Signals::EVENT_SIGNALED);
2186        assert_matches!(
2187            TestExecutor::poll_until_stalled(&mut done_task).await,
2188            Poll::Ready(Ok(_)),
2189            "Setup event not triggered after scheduling an alarm"
2190        );
2191
2192        let mut next_task = notifier_stream.next();
2193        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2194
2195        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(1)).await;
2196        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2197
2198        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(2)).await;
2199        assert_matches!(
2200            TestExecutor::poll_until_stalled(next_task).await,
2201            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2202        );
2203    }
2204
2205    #[fuchsia::test(allow_stalls = false)]
2206    async fn test_two_alarms_same() {
2207        const DEADLINE_NANOS: i64 = 100;
2208
2209        let ctx = TestContext::new().await;
2210
2211        let mut set_task_1 = ctx.wake_proxy.set_and_wait(
2212            fidl::BootInstant::from_nanos(DEADLINE_NANOS),
2213            fta::SetMode::KeepAlive(fake_wake_lease()),
2214            "Hello1".into(),
2215        );
2216        let mut set_task_2 = ctx.wake_proxy.set_and_wait(
2217            fidl::BootInstant::from_nanos(DEADLINE_NANOS),
2218            fta::SetMode::KeepAlive(fake_wake_lease()),
2219            "Hello2".into(),
2220        );
2221
2222        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2223        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2224
2225        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2226
2227        assert_matches!(
2228            TestExecutor::poll_until_stalled(&mut set_task_1).await,
2229            Poll::Ready(Ok(Ok(_)))
2230        );
2231        assert_matches!(
2232            TestExecutor::poll_until_stalled(&mut set_task_2).await,
2233            Poll::Ready(Ok(Ok(_)))
2234        );
2235    }
2236
2237    #[fuchsia::test(allow_stalls = false)]
2238    async fn test_two_alarms_same_notify() {
2239        const DEADLINE_NANOS: i64 = 100;
2240        const ALARM_ID_1: &str = "Hello1";
2241        const ALARM_ID_2: &str = "Hello2";
2242
2243        let ctx = TestContext::new().await;
2244
2245        let schedule = async |deadline_nanos: i64, alarm_id: &str| {
2246            let (notifier_client, notifier_stream) =
2247                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2248            assert_matches!(
2249                ctx.wake_proxy
2250                    .set(
2251                        notifier_client,
2252                        fidl::BootInstant::from_nanos(deadline_nanos),
2253                        fta::SetMode::KeepAlive(fake_wake_lease()),
2254                        alarm_id,
2255                    )
2256                    .await,
2257                Ok(Ok(()))
2258            );
2259            notifier_stream
2260        };
2261
2262        let mut notifier_1 = schedule(DEADLINE_NANOS, ALARM_ID_1).await;
2263        let mut notifier_2 = schedule(DEADLINE_NANOS, ALARM_ID_2).await;
2264
2265        let mut next_task_1 = notifier_1.next();
2266        let mut next_task_2 = notifier_2.next();
2267
2268        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2269        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_2).await, Poll::Pending);
2270
2271        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2272
2273        assert_matches!(
2274            TestExecutor::poll_until_stalled(&mut next_task_1).await,
2275            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID_1
2276        );
2277        assert_matches!(
2278            TestExecutor::poll_until_stalled(&mut next_task_2).await,
2279            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID_2
2280        );
2281
2282        assert_matches!(
2283            TestExecutor::poll_until_stalled(notifier_1.next()).await,
2284            Poll::Ready(None)
2285        );
2286        assert_matches!(
2287            TestExecutor::poll_until_stalled(notifier_2.next()).await,
2288            Poll::Ready(None)
2289        );
2290    }
2291
2292    #[test_case(100, 200 ; "push out")]
2293    #[test_case(200, 100 ; "pull in")]
2294    #[fuchsia::test(allow_stalls = false)]
2295    async fn test_two_alarms_different(
2296        // One timer scheduled at this instant (fake time starts from zero).
2297        first_deadline_nanos: i64,
2298        // Another timer scheduled at this instant.
2299        second_deadline_nanos: i64,
2300    ) {
2301        let ctx = TestContext::new().await;
2302
2303        let mut set_task_1 = ctx.wake_proxy.set_and_wait(
2304            fidl::BootInstant::from_nanos(first_deadline_nanos),
2305            fta::SetMode::KeepAlive(fake_wake_lease()),
2306            "Hello1".into(),
2307        );
2308        let mut set_task_2 = ctx.wake_proxy.set_and_wait(
2309            fidl::BootInstant::from_nanos(second_deadline_nanos),
2310            fta::SetMode::KeepAlive(fake_wake_lease()),
2311            "Hello2".into(),
2312        );
2313
2314        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2315        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2316
2317        // Sort alarms by their deadlines.
2318        let mut tasks = [(first_deadline_nanos, set_task_1), (second_deadline_nanos, set_task_2)];
2319        tasks.sort_by(|a, b| a.0.cmp(&b.0));
2320        let [mut first_task, mut second_task] = tasks;
2321
2322        // Alarms should fire in order of deadlines.
2323        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(first_task.0)).await;
2324        assert_matches!(
2325            TestExecutor::poll_until_stalled(&mut first_task.1).await,
2326            Poll::Ready(Ok(Ok(_)))
2327        );
2328        assert_matches!(TestExecutor::poll_until_stalled(&mut second_task.1).await, Poll::Pending);
2329
2330        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(second_task.0)).await;
2331        assert_matches!(
2332            TestExecutor::poll_until_stalled(&mut second_task.1).await,
2333            Poll::Ready(Ok(Ok(_)))
2334        );
2335    }
2336
2337    #[test_case(100, 200 ; "push out")]
2338    #[test_case(200, 100 ; "pull in")]
2339    #[fuchsia::test(allow_stalls = false)]
2340    async fn test_two_alarms_different_notify(
2341        // One timer scheduled at this instant (fake time starts from zero).
2342        first_deadline_nanos: i64,
2343        // Another timer scheduled at this instant.
2344        second_deadline_nanos: i64,
2345    ) {
2346        const ALARM_ID_1: &str = "Hello1";
2347        const ALARM_ID_2: &str = "Hello2";
2348
2349        let ctx = TestContext::new().await;
2350
2351        let schedule = async |deadline_nanos: i64, alarm_id: &str| {
2352            let (notifier_client, notifier_stream) =
2353                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2354            assert_matches!(
2355                ctx.wake_proxy
2356                    .set(
2357                        notifier_client,
2358                        fidl::BootInstant::from_nanos(deadline_nanos),
2359                        fta::SetMode::KeepAlive(fake_wake_lease()),
2360                        alarm_id,
2361                    )
2362                    .await,
2363                Ok(Ok(()))
2364            );
2365            notifier_stream
2366        };
2367
2368        // Sort alarms by their deadlines.
2369        let mut notifier_all = futures::stream::select_all([
2370            schedule(first_deadline_nanos, ALARM_ID_1).await,
2371            schedule(second_deadline_nanos, ALARM_ID_2).await,
2372        ]);
2373        let [(early_ns, early_alarm), (later_ns, later_alarm)] = {
2374            let mut tasks =
2375                [(first_deadline_nanos, ALARM_ID_1), (second_deadline_nanos, ALARM_ID_2)];
2376            tasks.sort_by(|a, b| a.0.cmp(&b.0));
2377            tasks
2378        };
2379
2380        // Alarms should fire in order of deadlines.
2381        let mut next_task = notifier_all.next();
2382        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2383
2384        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(early_ns)).await;
2385        assert_matches!(
2386            TestExecutor::poll_until_stalled(next_task).await,
2387            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == early_alarm
2388        );
2389
2390        let mut next_task = notifier_all.next();
2391        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2392
2393        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(later_ns)).await;
2394        assert_matches!(
2395            TestExecutor::poll_until_stalled(next_task).await,
2396            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == later_alarm
2397        );
2398        assert_matches!(
2399            TestExecutor::poll_until_stalled(notifier_all.next()).await,
2400            Poll::Ready(None)
2401        );
2402    }
2403
2404    #[fuchsia::test(allow_stalls = false)]
2405    async fn test_alarm_immediate() {
2406        let ctx = TestContext::new().await;
2407        let mut set_task = ctx.wake_proxy.set_and_wait(
2408            fidl::BootInstant::INFINITE_PAST,
2409            fta::SetMode::KeepAlive(fake_wake_lease()),
2410            "Hello1".into(),
2411        );
2412        assert_matches!(
2413            TestExecutor::poll_until_stalled(&mut set_task).await,
2414            Poll::Ready(Ok(Ok(_)))
2415        );
2416    }
2417
2418    #[fuchsia::test(allow_stalls = false)]
2419    async fn test_alarm_immediate_notify() {
2420        const ALARM_ID: &str = "Hello";
2421        let ctx = TestContext::new().await;
2422
2423        let (notifier_client, mut notifier_stream) =
2424            fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2425
2426        let mut set_task = ctx.wake_proxy.set(
2427            notifier_client,
2428            fidl::BootInstant::INFINITE_PAST,
2429            fta::SetMode::KeepAlive(fake_wake_lease()),
2430            ALARM_ID,
2431        );
2432        assert_matches!(
2433            TestExecutor::poll_until_stalled(&mut set_task).await,
2434            Poll::Ready(Ok(Ok(_)))
2435        );
2436        assert_matches!(
2437            TestExecutor::poll_until_stalled(notifier_stream.next()).await,
2438            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2439        );
2440    }
2441
2442    // Rescheduling a timer will cancel the earlier call and use the new
2443    // deadline for the later call.
2444    #[test_case(200, 100 ; "pull in")]
2445    #[test_case(100, 200 ; "push out")]
2446    #[test_case(100, 100 ; "replace with the same deadline")]
2447    #[fuchsia::test(allow_stalls = false)]
2448    async fn test_reschedule(initial_deadline_nanos: i64, override_deadline_nanos: i64) {
2449        const ALARM_ID: &str = "Hello";
2450
2451        let ctx = TestContext::new().await;
2452
2453        let schedule = |deadline_nanos: i64| {
2454            let setup_done = zx::Event::create();
2455            let task = ctx.wake_proxy.set_and_wait(
2456                fidl::BootInstant::from_nanos(deadline_nanos),
2457                fta::SetMode::NotifySetupDone(
2458                    setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2459                ),
2460                ALARM_ID.into(),
2461            );
2462            (task, setup_done)
2463        };
2464
2465        // Schedule timer with a long timeout first. Let it wait, then
2466        // try to reschedule the same timer
2467        let (mut set_task_1, setup_done_1) = schedule(initial_deadline_nanos);
2468        fasync::OnSignals::new(setup_done_1, zx::Signals::EVENT_SIGNALED).await.unwrap();
2469        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2470
2471        // Schedule the same timer as above, but with a shorter deadline. This
2472        // should cancel the earlier call.
2473        let (mut set_task_2, setup_done_2) = schedule(override_deadline_nanos);
2474        fasync::OnSignals::new(setup_done_2, zx::Signals::EVENT_SIGNALED).await.unwrap();
2475        assert_matches!(
2476            TestExecutor::poll_until_stalled(&mut set_task_1).await,
2477            Poll::Ready(Ok(Err(fta::WakeAlarmsError::Dropped)))
2478        );
2479        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2480
2481        // The later call will be fired exactly on the new shorter deadline.
2482        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(override_deadline_nanos - 1))
2483            .await;
2484        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2485        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(override_deadline_nanos))
2486            .await;
2487        assert_matches!(
2488            TestExecutor::poll_until_stalled(&mut set_task_2).await,
2489            Poll::Ready(Ok(Ok(_)))
2490        );
2491
2492        // The values in the inspector tree are fixed because the test
2493        // runs fully deterministically in fake time.
2494        assert_data_tree!(finspect::component::inspector(), root: {
2495            test: {
2496                hardware: {
2497                    // All alarms fired, so this should be "none".
2498                    current_deadline: "(none)",
2499                    remaining_until_alarm: "(none)",
2500                },
2501                now_formatted: format!("{override_deadline_nanos}ns ({override_deadline_nanos})"),
2502                now_ns: override_deadline_nanos,
2503                pending_timers: "Boot:\n\t\n\tUTC:\n\t",
2504                pending_timers_count: 0u64,
2505                requested_deadlines_ns: AnyProperty,
2506                schedule_delay_ns: AnyProperty,
2507                slack_ns: AnyProperty,
2508                boot_deadlines_count: AnyProperty,
2509                utc_deadlines_count: AnyProperty,
2510                debug_node: contains {},
2511            },
2512        });
2513    }
2514
2515    // Rescheduling a timer will send an error on the old notifier and use the
2516    // new notifier for the new deadline.
2517    #[fuchsia::test(allow_stalls = false)]
2518    async fn test_reschedule_notify() {
2519        const ALARM_ID: &str = "Hello";
2520        const INITIAL_DEADLINE_NANOS: i64 = 100;
2521        const OVERRIDE_DEADLINE_NANOS: i64 = 200;
2522
2523        let ctx = TestContext::new().await;
2524
2525        let schedule = async |deadline_nanos: i64| {
2526            let (notifier_client, notifier_stream) =
2527                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2528            assert_matches!(
2529                ctx.wake_proxy
2530                    .set(
2531                        notifier_client,
2532                        fidl::BootInstant::from_nanos(deadline_nanos),
2533                        fta::SetMode::KeepAlive(fake_wake_lease()),
2534                        ALARM_ID.into(),
2535                    )
2536                    .await,
2537                Ok(Ok(()))
2538            );
2539            notifier_stream
2540        };
2541
2542        let mut notifier_1 = schedule(INITIAL_DEADLINE_NANOS).await;
2543        let mut next_task_1 = notifier_1.next();
2544        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2545
2546        let mut notifier_2 = schedule(OVERRIDE_DEADLINE_NANOS).await;
2547        let mut next_task_2 = notifier_2.next();
2548        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_2).await, Poll::Pending);
2549
2550        // First notifier is called with an error then closed.
2551        assert_matches!(
2552            TestExecutor::poll_until_stalled(&mut next_task_1).await,
2553            Poll::Ready(Some(Ok(fta::NotifierRequest::NotifyError { alarm_id, error, .. }))) if alarm_id == ALARM_ID && error == fta::WakeAlarmsError::Dropped
2554        );
2555        assert_matches!(
2556            TestExecutor::poll_until_stalled(notifier_1.next()).await,
2557            Poll::Ready(None)
2558        );
2559
2560        // Second notifier is called upon the new deadline then closed.
2561        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(OVERRIDE_DEADLINE_NANOS))
2562            .await;
2563        assert_matches!(
2564            TestExecutor::poll_until_stalled(next_task_2).await,
2565            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2566        );
2567        assert_matches!(
2568            TestExecutor::poll_until_stalled(notifier_2.next()).await,
2569            Poll::Ready(None)
2570        );
2571    }
2572
2573    // If we get two scheduling FIDL errors one after another, the wake alarm
2574    // manager must not lock up.
2575    #[fuchsia::test(allow_stalls = false)]
2576    async fn test_fidl_error_on_reschedule() {
2577        const DEADLINE_NANOS: i64 = 100;
2578
2579        let (wake_proxy, _stream) =
2580            fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
2581        drop(_stream);
2582
2583        assert_matches!(
2584            wake_proxy
2585                .set_and_wait(
2586                    zx::BootInstant::from_nanos(DEADLINE_NANOS).into(),
2587                    fta::SetMode::KeepAlive(fake_wake_lease()),
2588                    "hello1".into(),
2589                )
2590                .await,
2591            Err(fidl::Error::ClientChannelClosed { .. })
2592        );
2593
2594        assert_matches!(
2595            wake_proxy
2596                .set_and_wait(
2597                    zx::BootInstant::from_nanos(DEADLINE_NANOS).into(),
2598                    fta::SetMode::KeepAlive(fake_wake_lease()),
2599                    "hello2".into(),
2600                )
2601                .await,
2602            Err(fidl::Error::ClientChannelClosed { .. })
2603        );
2604    }
2605
2606    // Verify that if a UTC timer is scheduled in the future on the UTC timeline, then the
2607    // UTC clock is changed to move "now" beyond the timer's deadline, the timer fires.
2608    #[fuchsia::test(allow_stalls = false)]
2609    async fn test_set_and_wait_utc() {
2610        const ALARM_ID: &str = "Hello_set_and_wait_utc";
2611        let ctx = TestContext::new().await;
2612
2613        let now_boot = fasync::BootInstant::now();
2614        ctx.utc_clock
2615            .update(
2616                zx::ClockUpdate::builder()
2617                    .absolute_value(now_boot.into(), ctx.utc_backstop)
2618                    .build(),
2619            )
2620            .unwrap();
2621
2622        let timestamp_utc = ctx.utc_backstop + fxr::UtcDuration::from_nanos(2);
2623        let mut wake_fut = ctx.wake_proxy.set_and_wait_utc(
2624            &fta::InstantUtc { timestamp_utc: timestamp_utc.into_nanos() },
2625            fta::SetMode::KeepAlive(fake_wake_lease()),
2626            ALARM_ID,
2627        );
2628
2629        // Timer is not expired yet.
2630        assert_matches!(TestExecutor::poll_until_stalled(&mut wake_fut).await, Poll::Pending);
2631
2632        // Move the UTC timeline.
2633        ctx.utc_clock
2634            .update(
2635                zx::ClockUpdate::builder()
2636                    .absolute_value(
2637                        now_boot.into(),
2638                        ctx.utc_backstop + fxr::UtcDuration::from_nanos(100),
2639                    )
2640                    .build(),
2641            )
2642            .unwrap();
2643
2644        // See similar code in the test above.
2645        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(1)).await;
2646        assert_matches!(TestExecutor::poll_until_stalled(wake_fut).await, Poll::Ready(_));
2647    }
2648}