alarms/
lib.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Alarm management subsystem.
6//!
7//! This subsystem serves the FIDL API `fuchsia.time.alarms/Wake`. To instantiate,
8//! you can use the following approach:
9//!
10//! ```ignore
11//! let proxy = client::connect_to_protocol::<ffhh::DeviceMarker>().map_err(
12//!    |e| error!("error: {}", e)).expect("add proper error handling");
13//!    let timer_loop = alarms::Handle::new(proxy);
14//! ```
15//!
16//! From here, use the standard approach with [ServiceFs::new] to expose the
17//! discoverable FIDL endpoint and call:
18//!
19//! ```ignore
20//! let stream: fidl_fuchsia_time_alarms::WakeRequestStream = ... ;
21//! alarms::serve(timer_loop, stream).await;
22//! // ...
23//! ```
24//!
25//! Of course, for everything to work well, your component will need appropriate
26//! capability routing.  Refer to capability routing docs for those details.
27
28mod emu;
29
30use crate::emu::EmulationTimerOps;
31use anyhow::{anyhow, Context, Result};
32use async_trait::async_trait;
33use fidl::encoding::ProxyChannelBox;
34use fidl::endpoints::RequestStream;
35use fidl::HandleBased;
36use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
37use fuchsia_inspect::{HistogramProperty, Property};
38use futures::channel::mpsc;
39use futures::sink::SinkExt;
40use futures::{StreamExt, TryStreamExt};
41use log::{debug, error, warn};
42use scopeguard::defer;
43use std::cell::RefCell;
44use std::cmp;
45use std::collections::{BTreeMap, BinaryHeap, HashMap};
46use std::rc::Rc;
47use std::sync::LazyLock;
48use time_pretty::{format_duration, format_timer, MSEC_IN_NANOS};
49use zx::AsHandleRef;
50use {
51    fidl_fuchsia_hardware_hrtimer as ffhh, fidl_fuchsia_time_alarms as fta,
52    fuchsia_async as fasync, fuchsia_inspect as finspect, fuchsia_trace as trace,
53};
54
55static I64_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i64::MAX.try_into().expect("infallible"));
56static I32_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i32::MAX.try_into().expect("infallible"));
57
58/// The largest value of timer "ticks" that is still considered useful.
59static MAX_USEFUL_TICKS: LazyLock<u64> = LazyLock::new(|| *I32_MAX_AS_U64);
60
61/// The hrtimer ID used for scheduling wake alarms.  This ID is reused from
62/// Starnix, and should eventually no longer be critical.
63const MAIN_TIMER_ID: usize = 6;
64
65/// TODO(b/383062441): remove this special casing once Starnix hrtimer is fully
66/// migrated to multiplexed timer.
67/// A special-cased Starnix timer ID, used to allow cross-connection setup
68/// for Starnix only.
69const TEMPORARY_STARNIX_TIMER_ID: &str = "starnix-hrtimer";
70static TEMPORARY_STARNIX_CID: LazyLock<zx::Event> = LazyLock::new(|| zx::Event::create());
71
72/// This is what we consider a "long" delay in alarm operations.
73const LONG_DELAY_NANOS: i64 = 2000 * MSEC_IN_NANOS;
74
75/// Compares two optional deadlines and returns true if the `before is different from `after.
76/// Nones compare as equal.
77fn is_deadline_changed(
78    before: Option<fasync::BootInstant>,
79    after: Option<fasync::BootInstant>,
80) -> bool {
81    match (before, after) {
82        (None, None) => false,
83        (None, Some(_)) | (Some(_), None) => true,
84        (Some(before), Some(after)) => before != after,
85    }
86}
87
88// Errors returnable from [TimerOps] calls.
89#[derive(Debug)]
90pub(crate) enum TimerOpsError {
91    /// The driver reported an error.
92    Driver(ffhh::DriverError),
93    /// FIDL-specific RPC error.
94    Fidl(fidl::Error),
95}
96
97trait SawResponseFut: std::future::Future<Output = Result<zx::EventPair, TimerOpsError>> {
98    // nop
99}
100
101/// Abstracts away timer operations.
102#[async_trait(?Send)]
103pub(crate) trait TimerOps {
104    /// Stop the timer with the specified ID.
105    async fn stop(&self, id: u64);
106
107    /// Examine the timer's properties, such as supported resolutions and tick
108    /// counts.
109    async fn get_timer_properties(&self) -> TimerConfig;
110
111    /// This method must return an actual future, to handle the borrow checker:
112    /// making this async will assume that `self` remains borrowed, which will
113    /// thwart attempts to move the return value of this call into a separate
114    /// closure.
115    fn start_and_wait(
116        &self,
117        id: u64,
118        resolution: &ffhh::Resolution,
119        ticks: u64,
120        setup_event: zx::Event,
121    ) -> std::pin::Pin<Box<dyn SawResponseFut>>;
122}
123
124/// TimerOps backed by an actual hardware timer.
125struct HardwareTimerOps {
126    proxy: ffhh::DeviceProxy,
127}
128
129impl HardwareTimerOps {
130    fn new(proxy: ffhh::DeviceProxy) -> Box<Self> {
131        Box::new(Self { proxy })
132    }
133}
134
135#[async_trait(?Send)]
136impl TimerOps for HardwareTimerOps {
137    async fn stop(&self, id: u64) {
138        let _ = self
139            .proxy
140            .stop(id)
141            .await
142            .map(|result| {
143                let _ = result.map_err(|e| warn!("stop_hrtimer: driver error: {:?}", e));
144            })
145            .map_err(|e| warn!("stop_hrtimer: could not stop prior timer: {}", e));
146    }
147
148    async fn get_timer_properties(&self) -> TimerConfig {
149        match self.proxy.get_properties().await {
150            Ok(p) => {
151                let timers_properties = &p.timers_properties.expect("timers_properties must exist");
152                debug!("get_timer_properties: got: {:?}", timers_properties);
153
154                // Pick the correct hrtimer to use for wakes.
155                let timer_index = if timers_properties.len() > MAIN_TIMER_ID {
156                    // Mostly vim3, where we have pre-existing timer allocations
157                    // that we don't need to change.
158                    MAIN_TIMER_ID
159                } else if timers_properties.len() > 0 {
160                    // Newer devices that don't need to allocate timer IDs, and/or
161                    // may not even have as many timers as vim3 does. But, at least
162                    // one timer is needed.
163                    0
164                } else {
165                    // Give up.
166                    return TimerConfig::new_empty();
167                };
168                let main_timer_properties = &timers_properties[timer_index];
169                debug!("alarms: main_timer_properties: {:?}", main_timer_properties);
170                // Not sure whether it is useful to have more ticks than this, so limit it.
171                let max_ticks: u64 = std::cmp::min(
172                    main_timer_properties.max_ticks.unwrap_or(*MAX_USEFUL_TICKS),
173                    *MAX_USEFUL_TICKS,
174                );
175                let resolutions = &main_timer_properties
176                    .supported_resolutions
177                    .as_ref()
178                    .expect("supported_resolutions is populated")
179                    .iter()
180                    .last() //  Limits the resolution to the coarsest available.
181                    .map(|r| match *r {
182                        ffhh::Resolution::Duration(d) => d,
183                        _ => {
184                            error!(
185                            "get_timer_properties: Unknown resolution type, returning millisecond."
186                        );
187                            MSEC_IN_NANOS
188                        }
189                    })
190                    .map(|d| zx::BootDuration::from_nanos(d))
191                    .into_iter() // Used with .last() above.
192                    .collect::<Vec<_>>();
193                let timer_id = main_timer_properties.id.expect("timer ID is always present");
194                TimerConfig::new_from_data(timer_id, resolutions, max_ticks)
195            }
196            Err(e) => {
197                error!("could not get timer properties: {:?}", e);
198                TimerConfig::new_empty()
199            }
200        }
201    }
202
203    fn start_and_wait(
204        &self,
205        id: u64,
206        resolution: &ffhh::Resolution,
207        ticks: u64,
208        setup_event: zx::Event,
209    ) -> std::pin::Pin<Box<dyn SawResponseFut>> {
210        let inner = self.proxy.start_and_wait(id, resolution, ticks, setup_event);
211        Box::pin(HwResponseFut { pinner: Box::pin(inner) })
212    }
213}
214
215// Untangles the borrow checker issues that otherwise result from making
216// TimerOps::start_and_wait an async function.
217struct HwResponseFut {
218    pinner: std::pin::Pin<
219        Box<
220            fidl::client::QueryResponseFut<
221                ffhh::DeviceStartAndWaitResult,
222                fidl::encoding::DefaultFuchsiaResourceDialect,
223            >,
224        >,
225    >,
226}
227
228use std::task::Poll;
229impl SawResponseFut for HwResponseFut {}
230impl std::future::Future for HwResponseFut {
231    type Output = Result<zx::EventPair, TimerOpsError>;
232    fn poll(
233        mut self: std::pin::Pin<&mut Self>,
234        cx: &mut std::task::Context<'_>,
235    ) -> std::task::Poll<Self::Output> {
236        let inner_poll = self.pinner.as_mut().poll(cx);
237        match inner_poll {
238            Poll::Ready(result) => Poll::Ready(match result {
239                Ok(Ok(keep_alive)) => Ok(keep_alive),
240                Ok(Err(e)) => Err(TimerOpsError::Driver(e)),
241                Err(e) => Err(TimerOpsError::Fidl(e)),
242            }),
243            Poll::Pending => Poll::Pending,
244        }
245    }
246}
247
248/// Stops a currently running hardware timer.
249async fn stop_hrtimer(hrtimer: &Box<dyn TimerOps>, timer_config: &TimerConfig) {
250    trace::duration!(c"alarms", c"hrtimer:stop", "id" => timer_config.id);
251    debug!("stop_hrtimer: stopping hardware timer: {}", timer_config.id);
252    hrtimer.stop(timer_config.id).await;
253    debug!("stop_hrtimer: stopped  hardware timer: {}", timer_config.id);
254}
255
256// The default size of the channels created in this module.
257const CHANNEL_SIZE: usize = 100;
258
259/// A type handed around between the concurrent loops run by this module.
260#[derive(Debug)]
261enum Cmd {
262    /// Request a timer to be started.
263    Start {
264        /// The unique connection ID.
265        cid: zx::Koid,
266        /// A timestamp (presumably in the future), at which to expire the timer.
267        deadline: fasync::BootInstant,
268        // The API supports several modes. See fuchsia.time.alarms/Wake.fidl.
269        mode: fta::SetAndWaitMode,
270        /// An alarm identifier, chosen by the caller.
271        alarm_id: String,
272        /// A responder that will be called when the timer expires. The
273        /// client end of the connection will block until we send something
274        /// on this responder.
275        ///
276        /// This is packaged into a Rc... only because both the "happy path"
277        /// and the error path must consume the responder.  This allows them
278        /// to be consumed, without the responder needing to implement Default.
279        responder: Rc<RefCell<Option<fta::WakeAlarmsSetAndWaitResponder>>>,
280    },
281    StopById {
282        done: zx::Event,
283        timer_id: TimerId,
284    },
285    Alarm {
286        expired_deadline: fasync::BootInstant,
287        keep_alive: fidl::EventPair,
288    },
289    AlarmFidlError {
290        expired_deadline: fasync::BootInstant,
291        error: fidl::Error,
292    },
293    AlarmDriverError {
294        expired_deadline: fasync::BootInstant,
295        error: ffhh::DriverError,
296    },
297}
298
299impl std::fmt::Display for Cmd {
300    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
301        match self {
302            Cmd::Start { cid, deadline, alarm_id, .. } => {
303                write!(
304                    f,
305                    "Start[alarm_id=\"{}\", cid={:?}, deadline={}]",
306                    alarm_id,
307                    cid,
308                    format_timer((*deadline).into())
309                )
310            }
311            Cmd::Alarm { expired_deadline, .. } => {
312                write!(f, "Alarm[deadline={}]", format_timer((*expired_deadline).into()))
313            }
314            Cmd::AlarmFidlError { expired_deadline, error } => {
315                write!(
316                    f,
317                    "FIDLError[deadline={}, err={}, NO_WAKE_LEASE!]",
318                    format_timer((*expired_deadline).into()),
319                    error
320                )
321            }
322            Cmd::AlarmDriverError { expired_deadline, error } => {
323                write!(
324                    f,
325                    "DriverError[deadline={}, err={:?}, NO_WAKE_LEASE!]",
326                    format_timer((*expired_deadline).into()),
327                    error
328                )
329            }
330            Cmd::StopById { timer_id, done: _ } => {
331                write!(f, "StopById[timerId={}]", timer_id,)
332            }
333        }
334    }
335}
336
337/// Extracts a KOID from the undelrying channel of the provided [stream].
338///
339/// # Returns
340/// - zx::Koid: the KOID you wanted.
341/// - fta::WakeAlarmsRequestStream: the stream; we had to deconstruct it briefly,
342///   so this gives it back to you.
343pub fn get_stream_koid(
344    stream: fta::WakeAlarmsRequestStream,
345) -> (zx::Koid, fta::WakeAlarmsRequestStream) {
346    let (inner, is_terminated) = stream.into_inner();
347    let koid = inner.channel().as_channel().get_koid().expect("infallible");
348    let stream = fta::WakeAlarmsRequestStream::from_inner(inner, is_terminated);
349    (koid, stream)
350}
351
352/// Serves a single Wake API client.
353pub async fn serve(timer_loop: Rc<Loop>, requests: fta::WakeAlarmsRequestStream) {
354    // Compute the request ID somehow.
355    fasync::Task::local(async move {
356        let timer_loop = timer_loop.clone();
357        let timer_loop_send = || timer_loop.get_sender();
358        let (cid, mut requests) = get_stream_koid(requests);
359        let mut request_count = 0;
360        debug!("alarms::serve: opened connection: {:?}", cid);
361        while let Some(maybe_request) = requests.next().await {
362            request_count += 1;
363            debug!("alarms::serve: cid: {:?} incoming request: {}", cid, request_count);
364            match maybe_request {
365                Ok(request) => {
366                    // Should return quickly.
367                    handle_request(cid, timer_loop_send(), request).await;
368                }
369                Err(e) => {
370                    warn!("alarms::serve: error in request: {:?}", e);
371                }
372            }
373            debug!("alarms::serve: cid: {:?} done request: {}", cid, request_count);
374        }
375        // Check if connection closure was intentional. It is way too easy to close
376        // a FIDL connection inadvertently if doing non-mainstream things with FIDL.
377        warn!("alarms::serve: CLOSED CONNECTION: cid: {:?}", cid);
378    })
379    .detach();
380}
381
382// Inject a constant KOID as connection ID (cid) if the singular alarm ID corresponds to a Starnix
383// alarm.
384// TODO(b/383062441): remove this special casing.
385fn compute_cid(cid: zx::Koid, alarm_id: &str) -> zx::Koid {
386    if alarm_id == TEMPORARY_STARNIX_TIMER_ID {
387        // Temporarily, the Starnix timer is a singleton and always gets the
388        // same CID.
389        TEMPORARY_STARNIX_CID.as_handle_ref().get_koid().expect("infallible")
390    } else {
391        cid
392    }
393}
394
395async fn handle_cancel(alarm_id: String, cid: zx::Koid, cmd: &mut mpsc::Sender<Cmd>) {
396    let done = zx::Event::create();
397    let cid = compute_cid(cid, &alarm_id);
398    let timer_id = TimerId { alarm_id: alarm_id.clone(), cid };
399    if let Err(e) = cmd.send(Cmd::StopById { timer_id, done: clone_handle(&done) }).await {
400        warn!("handle_request: error while trying to cancel: {}: {:?}", alarm_id, e);
401    }
402    wait_signaled(&done).await;
403}
404
405/// Processes a single Wake API request from a single client.
406/// This function is expected to return quickly.
407///
408/// # Args
409/// - `cid`: the unique identifier of the connection producing these requests.
410/// - `cmd`: the outbound queue of commands to deliver to the timer manager.
411/// - `request`: a single inbound Wake FIDL API request.
412async fn handle_request(
413    cid: zx::Koid,
414    mut cmd: mpsc::Sender<Cmd>,
415    request: fta::WakeAlarmsRequest,
416) {
417    match request {
418        fta::WakeAlarmsRequest::SetAndWait { deadline, mode, alarm_id, responder } => {
419            // Since responder is consumed by the happy path and the error path, but not both,
420            // and because the responder does not implement Default, this is a way to
421            // send it in two mutually exclusive directions.  Each direction will reverse
422            // this wrapping once the responder makes it to the other side.
423            //
424            // Rc required because of sharing a noncopyable struct; RefCell required because
425            // borrow_mut() is needed to move out; and Option is required so we can
426            // use take() to replace the struct with None so it does not need to leave
427            // a Default in its place.
428            let responder = Rc::new(RefCell::new(Some(responder)));
429            let cid = compute_cid(cid, &alarm_id);
430
431            // Alarm is not scheduled yet!
432            debug!(
433                "handle_request: scheduling alarm_id: \"{}\"\n\tcid: {:?}\n\tdeadline: {}",
434                alarm_id,
435                cid,
436                format_timer(deadline.into())
437            );
438            // Expected to return quickly.
439            if let Err(e) = cmd
440                .send(Cmd::Start {
441                    cid,
442                    deadline: deadline.into(),
443                    mode,
444                    alarm_id: alarm_id.clone(),
445                    responder: responder.clone(),
446                })
447                .await
448            {
449                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
450                responder
451                    .borrow_mut()
452                    .take()
453                    .expect("always present if call fails")
454                    .send(Err(fta::WakeAlarmsError::Internal))
455                    .unwrap();
456            }
457        }
458        fta::WakeAlarmsRequest::Cancel { alarm_id, .. } => {
459            // TODO: b/383062441 - make this into an async task so that we wait
460            // less to schedule the next alarm.
461            handle_cancel(alarm_id, cid, &mut cmd).await;
462        }
463        fta::WakeAlarmsRequest::_UnknownMethod { .. } => {}
464    };
465}
466
467/// Represents a single alarm event processing loop.
468///
469/// One instance is created per each alarm-capable low-level device.
470pub struct Loop {
471    // The task executing the alarm event processing [Loop].
472    _task: fasync::Task<()>,
473    // Given to any clients that need to send messages to `_task`
474    // via [get_sender].
475    snd_cloneable: mpsc::Sender<Cmd>,
476}
477
478impl Loop {
479    /// Creates a new instance of [Loop].
480    ///
481    /// `device_proxy` is a connection to a low-level timer device.
482    pub fn new(device_proxy: ffhh::DeviceProxy, inspect: finspect::Node) -> Self {
483        let hw_device_timer_ops = HardwareTimerOps::new(device_proxy);
484        Loop::new_internal(hw_device_timer_ops, inspect)
485    }
486
487    // Creates a new instance of [Loop] with emulated wake alarms.
488    pub fn new_emulated(inspect: finspect::Node) -> Self {
489        let timer_ops = Box::new(EmulationTimerOps::new());
490        Loop::new_internal(timer_ops, inspect)
491    }
492
493    fn new_internal(timer_ops: Box<dyn TimerOps>, inspect: finspect::Node) -> Self {
494        let (snd, rcv) = mpsc::channel(CHANNEL_SIZE);
495        let snd_clone = snd.clone();
496        let _task = fasync::Task::local(async move {
497            wake_timer_loop(snd_clone, rcv, timer_ops, inspect).await
498        });
499        Self { _task, snd_cloneable: snd }
500    }
501
502    /// Gets a copy of a channel through which async commands may be sent to
503    /// the [Loop].
504    fn get_sender(&self) -> mpsc::Sender<Cmd> {
505        self.snd_cloneable.clone()
506    }
507}
508
509/// A representation of the state of a single Timer.
510#[derive(Debug)]
511struct TimerNode {
512    /// The deadline at which the timer expires.
513    deadline: fasync::BootInstant,
514    /// The unique alarm ID associated with this timer.
515    alarm_id: String,
516    /// The unique connection ID that this timer belongs to.  Multiple timers
517    /// may share the same `cid`.
518    cid: zx::Koid,
519    /// The responder that is blocked until the timer expires.  Used to notify
520    /// the alarms subsystem client when this alarm expires.
521    responder: Option<fta::WakeAlarmsSetAndWaitResponder>,
522}
523
524impl TimerNode {
525    fn new(
526        deadline: fasync::BootInstant,
527        alarm_id: String,
528        cid: zx::Koid,
529        responder: fta::WakeAlarmsSetAndWaitResponder,
530    ) -> Self {
531        Self { deadline, alarm_id, cid, responder: Some(responder) }
532    }
533
534    fn get_alarm_id(&self) -> &str {
535        &self.alarm_id[..]
536    }
537
538    fn get_cid(&self) -> &zx::Koid {
539        &self.cid
540    }
541
542    fn get_id(&self) -> TimerId {
543        TimerId { alarm_id: self.alarm_id.clone(), cid: self.cid.clone() }
544    }
545
546    fn get_deadline(&self) -> &fasync::BootInstant {
547        &self.deadline
548    }
549
550    fn take_responder(&mut self) -> Option<fta::WakeAlarmsSetAndWaitResponder> {
551        self.responder.take()
552    }
553}
554
555impl Drop for TimerNode {
556    // If the TimerNode was evicted without having expired, notify the other
557    // end that the timer has been canceled.
558    fn drop(&mut self) {
559        let responder = self.take_responder();
560        responder.map(|r| {
561            // If the TimerNode is dropped, notify the client that may have
562            // been waiting. We can not drop a responder, because that kills
563            // the FIDL connection.
564            r.send(Err(fta::WakeAlarmsError::Dropped))
565                .map_err(|e| error!("could not drop responder: {:?}", e))
566        });
567    }
568}
569
570/// This and other comparison trait implementation are needed to establish
571/// a total ordering of TimerNodes.
572impl std::cmp::Eq for TimerNode {}
573
574impl std::cmp::PartialEq for TimerNode {
575    fn eq(&self, other: &Self) -> bool {
576        self.deadline == other.deadline && self.alarm_id == other.alarm_id && self.cid == other.cid
577    }
578}
579
580impl std::cmp::PartialOrd for TimerNode {
581    /// Order by deadline first, but timers with same deadline are ordered
582    /// by respective IDs to avoid ordering nondeterminism.
583    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
584        Some(self.cmp(other))
585    }
586}
587
588impl Ord for TimerNode {
589    /// Compares two [TimerNode]s, by "which is sooner".
590    ///
591    /// Ties are broken by alarm ID, then by connection ID.
592    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
593        let ordering = other.deadline.cmp(&self.deadline);
594        if ordering == std::cmp::Ordering::Equal {
595            let ordering = self.alarm_id.cmp(&self.alarm_id);
596            if ordering == std::cmp::Ordering::Equal {
597                self.cid.cmp(&other.cid)
598            } else {
599                ordering
600            }
601        } else {
602            ordering
603        }
604    }
605}
606
607/// A full timer identifier.
608#[derive(Debug, PartialEq, Eq, Hash)]
609struct TimerId {
610    /// Connection-unique alarm ID.
611    alarm_id: String,
612    /// Connection identifier, unique per each client connection.
613    cid: zx::Koid,
614}
615
616impl std::fmt::Display for TimerId {
617    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
618        write!(f, "TimerId[alarm_id:{},cid:{:?}]", self.alarm_id, self.cid)
619    }
620}
621
622/// Contains all the timers known by the alarms subsystem.
623///
624/// [Timers] can efficiently find a timer with the earliest deadline,
625/// and given a cutoff can expire one timer for which the deadline has
626/// passed.
627struct Timers {
628    timers: BinaryHeap<TimerNode>,
629    deadline_by_id: HashMap<TimerId, fasync::BootInstant>,
630}
631
632impl std::fmt::Display for Timers {
633    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
634        let now = fasync::BootInstant::now();
635        let sorted = self
636            .timers
637            .iter()
638            .map(|n| (n.deadline, n.alarm_id.clone()))
639            .collect::<BTreeMap<_, _>>()
640            .into_iter()
641            .map(|(k, v)| {
642                let remaining = k - now;
643                format!(
644                    "Timeout: {} => timer_id: {}, remaining: {}",
645                    format_timer(k.into()),
646                    v,
647                    format_duration(remaining.into())
648                )
649            })
650            .collect::<Vec<_>>();
651        let joined = sorted.join("\n\t");
652        write!(f, "\n\t{}", joined)
653    }
654}
655
656impl Timers {
657    /// Creates an empty [AllTimers].
658    fn new() -> Self {
659        Self { timers: BinaryHeap::new(), deadline_by_id: HashMap::new() }
660    }
661
662    /// Adds a [TimerNode] to [Timers].
663    ///
664    /// If the inserted node is identical to an already existing node, then
665    /// nothing is changed.  If the deadline is different, then the timer node
666    /// is replaced.
667    fn push(&mut self, n: TimerNode) {
668        let new_id = n.get_id();
669        if let Some(deadline) = self.deadline_by_id.get(&new_id) {
670            // There already is a deadline for this timer.
671            if n.deadline == *deadline {
672                return;
673            }
674            // Else replace. The deadline may be pushed out or pulled in.
675            self.deadline_by_id.insert(new_id, n.deadline.clone());
676            self.timers.retain(|t| t.get_id() != n.get_id());
677            self.timers.push(n);
678        } else {
679            // New timer node.
680            self.deadline_by_id.insert(new_id, n.deadline);
681            self.timers.push(n);
682        }
683    }
684
685    /// Returns a reference to the stored timer with the earliest deadline.
686    fn peek(&self) -> Option<&TimerNode> {
687        self.timers.peek()
688    }
689
690    /// Returns the deadline of the proximate timer in [Timers].
691    fn peek_deadline(&self) -> Option<fasync::BootInstant> {
692        self.peek().map(|t| t.deadline)
693    }
694
695    fn peek_id(&self) -> Option<TimerId> {
696        self.peek().map(|t| TimerId { alarm_id: t.alarm_id.clone(), cid: t.cid })
697    }
698
699    /// Args:
700    /// - `now` is the current time.
701    /// - `deadline` is the timer deadline to check for expiry.
702    fn expired(now: fasync::BootInstant, deadline: fasync::BootInstant) -> bool {
703        deadline <= now
704    }
705
706    /// Returns true if there are no known timers.
707    fn is_empty(&self) -> bool {
708        let empty1 = self.timers.is_empty();
709        let empty2 = self.deadline_by_id.is_empty();
710        assert!(empty1 == empty2, "broken invariant: empty1: {} empty2:{}", empty1, empty2);
711        empty1
712    }
713
714    /// Attempts to expire the earliest timer.
715    ///
716    /// If a timer is expired, it is removed from [Timers] and returned to the caller. Note that
717    /// there may be more timers that need expiring at the provided `reference instant`. To drain
718    /// [Timers] of all expired timers, one must repeat the call to this method with the same
719    /// value of `reference_instant` until it returns `None`.
720    ///
721    /// Args:
722    /// - `now`: the time instant to compare the stored timers against.  Timers for
723    ///   which the deadline has been reached or surpassed are eligible for expiry.
724    fn maybe_expire_earliest(&mut self, now: fasync::BootInstant) -> Option<TimerNode> {
725        self.peek_deadline()
726            .map(|d| {
727                if Timers::expired(now, d) {
728                    self.timers.pop().map(|e| {
729                        self.deadline_by_id.remove(&e.get_id());
730                        e
731                    })
732                } else {
733                    None
734                }
735            })
736            .flatten()
737    }
738
739    /// Removes an alarm by ID.  If the earliest alarm is the alarm to be removed,
740    /// it is returned.
741    fn remove_by_id(&mut self, timer_id: &TimerId) -> Option<TimerNode> {
742        let ret = if let Some(t) = self.peek_id() {
743            if t == *timer_id {
744                self.timers.pop()
745            } else {
746                None
747            }
748        } else {
749            None
750        };
751
752        self.timers.retain(|t| t.alarm_id != timer_id.alarm_id || t.cid != timer_id.cid);
753        self.deadline_by_id.remove(timer_id);
754        ret
755    }
756
757    /// Returns the number of currently pending timers.
758    fn timer_count(&self) -> usize {
759        let count1 = self.timers.len();
760        let count2 = self.deadline_by_id.len();
761        assert!(count1 == count2, "broken invariant: count1: {}, count2: {}", count1, count2);
762        count1
763    }
764}
765
766// Clones a handle. Needed for 1:N notifications.
767pub(crate) fn clone_handle<H: HandleBased>(handle: &H) -> H {
768    handle.duplicate_handle(zx::Rights::SAME_RIGHTS).expect("infallible")
769}
770
771async fn wait_signaled<H: HandleBased>(handle: &H) {
772    fasync::OnSignals::new(handle, zx::Signals::EVENT_SIGNALED).await.expect("infallible");
773}
774
775pub(crate) fn signal<H: HandleBased>(handle: &H) {
776    handle.signal_handle(zx::Signals::NONE, zx::Signals::EVENT_SIGNALED).expect("infallible");
777}
778
779/// A [TimerDuration] represents a duration of time that can be expressed by
780/// a discrete timer register.
781///
782/// This is a low-level representation of time duration, used in interaction with
783/// hardware devices. It is therefore necessarily discretized, with adaptive
784/// resolution, depending on the physical characteristics of the underlying
785/// hardware timer that it models.
786#[derive(Debug, Clone, Copy)]
787struct TimerDuration {
788    // The resolution of each one of the `ticks` below.
789    resolution: zx::BootDuration,
790    // The number of ticks that encodes time duration. Each "tick" represents
791    // one unit of `resolution` above.
792    ticks: u64,
793}
794
795/// This and the comparison traits below are used to allow TimerDuration
796/// calculations in a compact form.
797impl Eq for TimerDuration {}
798
799impl std::cmp::PartialOrd for TimerDuration {
800    fn partial_cmp(&self, other: &TimerDuration) -> Option<std::cmp::Ordering> {
801        Some(self.cmp(other))
802    }
803}
804
805impl std::cmp::PartialEq for TimerDuration {
806    fn eq(&self, other: &Self) -> bool {
807        self.cmp(other) == std::cmp::Ordering::Equal
808    }
809}
810
811impl std::cmp::Ord for TimerDuration {
812    /// Two [TimerDuration]s compare equal if they model exactly the same duration of time,
813    /// no matter the resolutions.
814    fn cmp(&self, other: &TimerDuration) -> std::cmp::Ordering {
815        let self_ticks_128: i128 = self.ticks as i128;
816        let self_resolution: i128 = self.resolution_as_nanos() as i128;
817        let self_nanos = self_resolution * self_ticks_128;
818
819        let other_ticks_128: i128 = other.ticks as i128;
820        let other_resolution: i128 = other.resolution_as_nanos() as i128;
821        let other_nanos = other_resolution * other_ticks_128;
822
823        self_nanos.cmp(&other_nanos)
824    }
825}
826
827impl std::fmt::Display for TimerDuration {
828    /// Human readable TimerDuration exposes both the tick count and the resolution,
829    /// in the format of "ticks x resolution", with an end result of
830    /// `10x5ms` for example.
831    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
832        let ticks = self.ticks;
833        let resolution = self.resolution();
834        // Example: 10x1ms
835        write!(f, "{}x{}", ticks, format_duration(resolution),)
836    }
837}
838
839impl TimerDuration {
840    /// The maximum representable TimerDuration that we allow.
841    fn max() -> Self {
842        TimerDuration::new(zx::BootDuration::from_nanos(1), *I64_MAX_AS_U64)
843    }
844
845    /// The zero [TimerDuration].
846    fn zero() -> Self {
847        TimerDuration::new(zx::BootDuration::from_nanos(1), 0)
848    }
849
850    /// Creates a new timer duration with the given parameters.
851    fn new(resolution: zx::BootDuration, ticks: u64) -> Self {
852        Self { resolution, ticks }
853    }
854
855    /// Creates a new timer duration using the resolution from `res_source` and
856    /// a specified number of ticks.
857    fn new_with_resolution(res_source: &TimerDuration, ticks: u64) -> Self {
858        Self::new(res_source.resolution, ticks)
859    }
860
861    /// Returns the time duration represented by this TimerDuration.
862    ///
863    /// Due to the way duration is expressed, the same time duration
864    /// can be represented in multiple ways.
865    fn duration(&self) -> zx::BootDuration {
866        let duration_as_nanos = self.resolution_as_nanos() * self.ticks;
867        let clamp_duration = std::cmp::min(*I32_MAX_AS_U64, duration_as_nanos);
868        zx::BootDuration::from_nanos(clamp_duration.try_into().expect("result was clamped"))
869    }
870
871    /// The resolution of this TimerDuration
872    fn resolution(&self) -> zx::BootDuration {
873        self.resolution
874    }
875
876    fn resolution_as_nanos(&self) -> u64 {
877        self.resolution().into_nanos().try_into().expect("resolution is never negative")
878    }
879
880    /// The number of ticks of this [TimerDuration].
881    fn ticks(&self) -> u64 {
882        self.ticks
883    }
884}
885
886impl From<zx::BootDuration> for TimerDuration {
887    fn from(d: zx::BootDuration) -> TimerDuration {
888        let nanos = d.into_nanos();
889        assert!(nanos >= 0);
890        let nanos_u64 = nanos.try_into().expect("guarded by assert");
891        TimerDuration::new(zx::BootDuration::from_nanos(1), nanos_u64)
892    }
893}
894
895impl std::ops::Div for TimerDuration {
896    type Output = u64;
897    fn div(self, rhs: Self) -> Self::Output {
898        let self_nanos = self.resolution_as_nanos() * self.ticks;
899        let rhs_nanos = rhs.resolution_as_nanos() * rhs.ticks;
900        self_nanos / rhs_nanos
901    }
902}
903
904impl std::ops::Mul<u64> for TimerDuration {
905    type Output = Self;
906    fn mul(self, rhs: u64) -> Self::Output {
907        Self::new(self.resolution, self.ticks * rhs)
908    }
909}
910
911/// Contains the configuration of a specific timer.
912#[derive(Debug)]
913pub(crate) struct TimerConfig {
914    /// The resolutions supported by this timer. Each entry is one possible
915    /// duration for on timer "tick".  The resolution is picked when a timer
916    /// request is sent.
917    resolutions: Vec<zx::BootDuration>,
918    /// The maximum count of "ticks" that the timer supports. The timer usually
919    /// has a register that counts up or down based on a clock signal with
920    /// the period specified by `resolutions`.  This is the maximum value that
921    /// the counter can count to without overflowing.
922    max_ticks: u64,
923    /// The stable ID of the timer with the above configuration.
924    id: u64,
925}
926
927impl TimerConfig {
928    /// Creates a new timer config with supported timer resolutions and the max
929    /// ticks value for the timer's counter.
930    fn new_from_data(timer_id: u64, resolutions: &[zx::BootDuration], max_ticks: u64) -> Self {
931        debug!(
932            "TimerConfig: resolutions: {:?}, max_ticks: {}, timer_id: {}",
933            resolutions.iter().map(|r| format_duration(*r)).collect::<Vec<_>>(),
934            max_ticks,
935            timer_id
936        );
937        let resolutions = resolutions.iter().map(|d| *d).collect::<Vec<zx::BootDuration>>();
938        TimerConfig { resolutions, max_ticks, id: timer_id }
939    }
940
941    fn new_empty() -> Self {
942        error!("TimerConfig::new_empty() called, this is not OK.");
943        TimerConfig { resolutions: vec![], max_ticks: 0, id: 0 }
944    }
945
946    // Picks the most appropriate timer setting for it to fire as close as possible
947    // when `duration` expires.
948    //
949    // If duration is too far in the future for what the timer supports,
950    // return a smaller value, to allow the timer to be reprogrammed multiple
951    // times.
952    //
953    // If the available menu of resolutions is such that we can wake only after
954    // the intended deadline, begrudgingly return that option.
955    fn pick_setting(&self, duration: zx::BootDuration) -> TimerDuration {
956        //  0         |-------------->|<---------------|
957        //  |---------+---------------+----------------+---->
958        //  |---------^               |                |
959        //  | best positive slack     |                |
960        //  |-------------------------^ duration       |
961        //  |------------------------------------------^ best negative slack.
962        let mut best_positive_slack = TimerDuration::zero();
963        let mut best_negative_slack = TimerDuration::max();
964
965        if self.max_ticks == 0 {
966            return TimerDuration::new(zx::BootDuration::from_millis(1), 0);
967        }
968        let duration_slack: TimerDuration = duration.into();
969
970        for res1 in self.resolutions.iter() {
971            let smallest_unit = TimerDuration::new(*res1, 1);
972            let max_tick_at_res = TimerDuration::new(*res1, self.max_ticks);
973
974            let smallest_slack_larger_than_duration = smallest_unit > duration_slack;
975            let largest_slack_smaller_than_duration = max_tick_at_res < duration_slack;
976
977            if smallest_slack_larger_than_duration {
978                if duration_slack == TimerDuration::zero() {
979                    best_negative_slack = TimerDuration::zero();
980                } else if smallest_unit < best_negative_slack {
981                    best_negative_slack = smallest_unit;
982                }
983            }
984            if largest_slack_smaller_than_duration {
985                if max_tick_at_res > best_positive_slack
986                    || best_positive_slack == TimerDuration::zero()
987                {
988                    best_positive_slack = max_tick_at_res;
989                }
990            }
991
992            // "Regular" case.
993            if !smallest_slack_larger_than_duration && !largest_slack_smaller_than_duration {
994                // Check whether duration divides evenly into the available slack options
995                // for this resolution.  If it does, then that is the slack we're looking for.
996                let q = duration_slack / smallest_unit;
997                let d = smallest_unit * q;
998                if d == duration_slack {
999                    // Exact match, we can return right now.
1000                    return d;
1001                } else {
1002                    // Not an exact match, so q ticks is before, but q+1 is after.
1003                    if d > best_positive_slack {
1004                        best_positive_slack = TimerDuration::new_with_resolution(&smallest_unit, q);
1005                    }
1006                    let d_plus = TimerDuration::new_with_resolution(&smallest_unit, q + 1);
1007                    if d_plus < best_negative_slack {
1008                        best_negative_slack = d_plus;
1009                    }
1010                }
1011            }
1012        }
1013
1014        let p_slack = duration - best_positive_slack.duration();
1015        let n_slack = best_negative_slack.duration() - duration;
1016
1017        // If the closest approximation is 0ns, then we can not advance time, so we reject it.
1018        // Otherwise pick the smallest slack.  Note that when we pick the best positive slack,
1019        // we will wake *before* the actual deadline.  In multi-resolution counters, this enables
1020        // us to pick a finer count in the next go.
1021        let ret = if p_slack < n_slack && best_positive_slack.duration().into_nanos() > 0 {
1022            best_positive_slack
1023        } else {
1024            best_negative_slack
1025        };
1026        debug!("TimerConfig: picked slack: {} for duration: {}", ret, format_duration(duration));
1027        assert!(
1028            ret.duration().into_nanos() >= 0,
1029            "ret: {}, p_slack: {}, n_slack: {}, orig.duration: {}\n\tbest_p_slack: {}\n\tbest_n_slack: {}\n\ttarget: {}\n\t 1: {} 2: {:?}, 3: {:?}",
1030            ret,
1031            format_duration(p_slack),
1032            format_duration(n_slack),
1033            format_duration(duration),
1034            best_positive_slack,
1035            best_negative_slack,
1036            duration_slack,
1037            p_slack != zx::BootDuration::ZERO,
1038            p_slack,
1039            zx::BootDuration::ZERO,
1040        );
1041        ret
1042    }
1043}
1044
1045async fn get_timer_properties(hrtimer: &Box<dyn TimerOps>) -> TimerConfig {
1046    debug!("get_timer_properties: requesting timer properties.");
1047    hrtimer.get_timer_properties().await
1048}
1049
1050/// The state of a single hardware timer that we must bookkeep.
1051struct TimerState {
1052    // The task waiting for the proximate timer to expire.
1053    task: fasync::Task<()>,
1054    // The deadline that the above task is waiting for.
1055    deadline: fasync::BootInstant,
1056}
1057
1058/// The command loop for timer interaction.  All changes to the wake alarm device programming
1059/// come in form of commands through `cmd`.
1060///
1061/// Args:
1062/// - `snd`: the send end of `cmd` below, a clone is given to each spawned sub-task.
1063/// - `cmds``: the input queue of alarm related commands.
1064/// - `timer_proxy`: the FIDL API proxy for interacting with the hardware device.
1065/// - `inspect`: the inspect node to record loop info into.
1066async fn wake_timer_loop(
1067    snd: mpsc::Sender<Cmd>,
1068    mut cmds: mpsc::Receiver<Cmd>,
1069    timer_proxy: Box<dyn TimerOps>,
1070    inspect: finspect::Node,
1071) {
1072    debug!("wake_timer_loop: started");
1073
1074    let mut timers = Timers::new();
1075    let timer_config = get_timer_properties(&timer_proxy).await;
1076
1077    // Keeps the currently executing HrTimer closure.  This is not read from, but
1078    // keeps the timer task active.
1079    #[allow(clippy::collection_is_never_read)]
1080    let mut hrtimer_status: Option<TimerState> = None;
1081
1082    // Initialize inspect properties. This must be done only once.
1083    //
1084    // Take note that these properties are updated when the `cmds` loop runs.
1085    // This means that repeated reads while no `cmds` activity occurs will return
1086    // old readings.  This is to ensure a consistent ability to replay the last
1087    // loop run if needed.
1088    let now_prop = inspect.create_int("now_ns", 0);
1089    let now_formatted_prop = inspect.create_string("now_formatted", "");
1090    let pending_timers_count_prop = inspect.create_uint("pending_timers_count", 0);
1091    let pending_timers_prop = inspect.create_string("pending_timers", "");
1092    let deadline_histogram_prop = inspect.create_int_exponential_histogram(
1093        "requested_deadlines_ns",
1094        finspect::ExponentialHistogramParams {
1095            floor: 0,
1096            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1097            // Allows capturing deadlines up to dozens of days.
1098            step_multiplier: 10,
1099            buckets: 16,
1100        },
1101    );
1102    let slack_histogram_prop = inspect.create_int_exponential_histogram(
1103        "slack_ns",
1104        finspect::ExponentialHistogramParams {
1105            floor: 0,
1106            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1107            step_multiplier: 10,
1108            buckets: 16,
1109        },
1110    );
1111    let schedule_delay_prop = inspect.create_int_exponential_histogram(
1112        "schedule_delay_ns",
1113        finspect::ExponentialHistogramParams {
1114            floor: 0,
1115            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1116            step_multiplier: 10,
1117            buckets: 16,
1118        },
1119    );
1120    // Internals of what was programmed into the wake alarms hardware.
1121    let hw_node = inspect.create_child("hardware");
1122    let current_hw_deadline_prop = hw_node.create_string("current_deadline", "");
1123    let remaining_until_alarm_prop = hw_node.create_string("remaining_until_alarm", "");
1124
1125    while let Some(cmd) = cmds.next().await {
1126        trace::duration!(c"alarms", c"Cmd");
1127        // Use a consistent notion of "now" across commands.
1128        let now = fasync::BootInstant::now();
1129        now_prop.set(now.into_nanos());
1130        trace::instant!(c"alarms", c"wake_timer_loop", trace::Scope::Process, "now" => now.into_nanos());
1131        match cmd {
1132            Cmd::Start { cid, deadline, mode, alarm_id, responder } => {
1133                trace::duration!(c"alarms", c"Cmd::Start");
1134                let responder = responder.borrow_mut().take().expect("responder is always present");
1135                // NOTE: hold keep_alive until all work is done.
1136                debug!(
1137                    "wake_timer_loop: START alarm_id: \"{}\", cid: {:?}\n\tdeadline: {}\n\tnow:      {}",
1138                    alarm_id,
1139                    cid,
1140                    format_timer(deadline.into()),
1141                    format_timer(now.into()),
1142                );
1143
1144                // This is the only option that requires further action.
1145                if let fta::SetAndWaitMode::NotifySetupDone(setup_done) = mode {
1146                    defer! {
1147                        // Must signal once the setup is completed.
1148                        signal(&setup_done);
1149                        debug!("wake_timer_loop: START: setup_done signaled");
1150                    };
1151                }
1152                deadline_histogram_prop.insert((deadline - now).into_nanos());
1153                if Timers::expired(now, deadline) {
1154                    trace::duration!(c"alarms", c"Cmd::Start:immediate");
1155                    // A timer set into now or the past expires right away.
1156                    let (_lease, keep_alive) = zx::EventPair::create();
1157                    debug!(
1158                        "[{}] wake_timer_loop: bogus lease {:?}",
1159                        line!(),
1160                        &keep_alive.get_koid().unwrap()
1161                    );
1162                    responder
1163                        .send(Ok(keep_alive))
1164                        .map(|_| {
1165                            debug!(
1166                                concat!(
1167                                    "wake_timer_loop: cid: {:?}, alarm: {}: EXPIRED IMMEDIATELY\n\t",
1168                                    "deadline({}) <= now({})"
1169                                ),
1170                                cid,
1171                                alarm_id,
1172                                format_timer(deadline.into()),
1173                                format_timer(now.into())
1174                            )
1175                        })
1176                        .map_err(|e| {
1177                            error!(
1178                            "wake_timer_loop: cid: {:?}, alarm: {}: could not notify, dropping: {}",
1179                                cid, alarm_id, e)
1180                        })
1181                        .unwrap_or(());
1182                } else {
1183                    trace::duration!(c"alarms", c"Cmd::Start:regular");
1184                    // A timer scheduled for the future gets inserted into the timer heap.
1185                    let was_empty = timers.is_empty();
1186
1187                    let deadline_before = timers.peek_deadline();
1188                    timers.push(TimerNode::new(deadline, alarm_id, cid, responder));
1189                    let deadline_after = timers.peek_deadline();
1190
1191                    let deadline_changed = is_deadline_changed(deadline_before, deadline_after);
1192                    let needs_cancel = !was_empty && deadline_changed;
1193                    let needs_reschedule = was_empty || deadline_changed;
1194
1195                    if needs_reschedule {
1196                        // Always schedule the proximate deadline.
1197                        let schedulable_deadline = deadline_after.unwrap_or(deadline);
1198                        if needs_cancel {
1199                            stop_hrtimer(&timer_proxy, &timer_config).await;
1200                        }
1201                        hrtimer_status = Some(
1202                            schedule_hrtimer(
1203                                now,
1204                                &timer_proxy,
1205                                schedulable_deadline,
1206                                snd.clone(),
1207                                &timer_config,
1208                                &schedule_delay_prop,
1209                            )
1210                            .await,
1211                        );
1212                    }
1213                }
1214            }
1215            Cmd::StopById { timer_id, done } => {
1216                trace::duration!(c"alarms", c"Cmd::StopById", "alarm_id" => &timer_id.alarm_id[..]);
1217                debug!("wake_timer_loop: STOP timer: {}", timer_id);
1218                let deadline_before = timers.peek_deadline();
1219
1220                if let Some(mut timer_node) = timers.remove_by_id(&timer_id) {
1221                    let deadline_after = timers.peek_deadline();
1222
1223                    if let Some(responder) = timer_node.take_responder() {
1224                        // We must reply to the responder to keep the connection open.
1225                        responder.send(Err(fta::WakeAlarmsError::Dropped)).expect("infallible");
1226                    }
1227                    if is_deadline_changed(deadline_before, deadline_after) {
1228                        stop_hrtimer(&timer_proxy, &timer_config).await;
1229                    }
1230                    if let Some(deadline) = deadline_after {
1231                        // Reschedule the hardware timer if the removed timer is the earliest one,
1232                        // and another one exists.
1233                        let new_timer_state = schedule_hrtimer(
1234                            now,
1235                            &timer_proxy,
1236                            deadline,
1237                            snd.clone(),
1238                            &timer_config,
1239                            &schedule_delay_prop,
1240                        )
1241                        .await;
1242                        let old_hrtimer_status = hrtimer_status.replace(new_timer_state);
1243                        if let Some(task) = old_hrtimer_status.map(|ev| ev.task) {
1244                            // Allow the task to complete, I suppose.
1245                            task.await;
1246                        }
1247                    } else {
1248                        // No next timer, clean up the hrtimer status.
1249                        hrtimer_status = None;
1250                    }
1251                } else {
1252                    debug!("wake_timer_loop: STOP: no active timer to stop: {}", timer_id);
1253                }
1254                signal(&done);
1255            }
1256            Cmd::Alarm { expired_deadline, keep_alive } => {
1257                trace::duration!(c"alarms", c"Cmd::Alarm");
1258                // Expire all eligible timers, based on "now".  This is because
1259                // we may have woken up earlier than the actual deadline. This
1260                // happens for example if the timer can not make the actual
1261                // deadline and needs to be re-programmed.
1262                debug!(
1263                    "wake_timer_loop: ALARM!!! reached deadline: {}, wakey-wakey! {:?}",
1264                    format_timer(expired_deadline.into()),
1265                    keep_alive.get_koid().unwrap(),
1266                );
1267                let expired_count =
1268                    notify_all(&mut timers, &keep_alive, now, &slack_histogram_prop)
1269                        .expect("notification succeeds");
1270                if expired_count == 0 {
1271                    // This could be a resolution switch, or a straggler notification.
1272                    // Either way, the hardware timer is still ticking, cancel it.
1273                    debug!("wake_timer_loop: no expired alarms, reset hrtimer state");
1274                    stop_hrtimer(&timer_proxy, &timer_config).await;
1275                }
1276                // There is a timer to reschedule, do that now.
1277                hrtimer_status = match timers.peek_deadline() {
1278                    None => None,
1279                    Some(deadline) => Some(
1280                        schedule_hrtimer(
1281                            now,
1282                            &timer_proxy,
1283                            deadline,
1284                            snd.clone(),
1285                            &timer_config,
1286                            &schedule_delay_prop,
1287                        )
1288                        .await,
1289                    ),
1290                }
1291            }
1292            Cmd::AlarmFidlError { expired_deadline, error } => {
1293                trace::duration!(c"alarms", c"Cmd::AlarmFidlError");
1294                // We do not have a wake lease, so the system may sleep before
1295                // we get to schedule a new timer. We have no way to avoid it
1296                // today.
1297                warn!(
1298                    "wake_timer_loop: FIDL error: {:?}, deadline: {}, now: {}",
1299                    error,
1300                    format_timer(expired_deadline.into()),
1301                    format_timer(now.into()),
1302                );
1303                // Manufacture a fake lease to make the code below work.
1304                // Maybe use Option instead?
1305                let (_dummy_lease, peer) = zx::EventPair::create();
1306                debug!("XXX: [{}] bogus lease: 1 {:?}", line!(), &peer.get_koid().unwrap());
1307                notify_all(&mut timers, &peer, now, &slack_histogram_prop)
1308                    .expect("notification succeeds");
1309                hrtimer_status = match timers.peek_deadline() {
1310                    None => None, // No remaining timers, nothing to schedule.
1311                    Some(deadline) => Some(
1312                        schedule_hrtimer(
1313                            now,
1314                            &timer_proxy,
1315                            deadline,
1316                            snd.clone(),
1317                            &timer_config,
1318                            &schedule_delay_prop,
1319                        )
1320                        .await,
1321                    ),
1322                }
1323            }
1324            Cmd::AlarmDriverError { expired_deadline, error } => {
1325                trace::duration!(c"alarms", c"Cmd::AlarmDriverError");
1326                let (_dummy_lease, peer) = zx::EventPair::create();
1327                debug!("XXX: [{}] bogus lease: {:?}", line!(), &peer.get_koid().unwrap());
1328                notify_all(&mut timers, &peer, now, &slack_histogram_prop)
1329                    .expect("notification succeeds");
1330                match error {
1331                    fidl_fuchsia_hardware_hrtimer::DriverError::Canceled => {
1332                        // Nothing to do here, cancelation is handled in Stop code.
1333                        debug!(
1334                            "wake_timer_loop: CANCELED timer at deadline: {}",
1335                            format_timer(expired_deadline.into())
1336                        );
1337                    }
1338                    _ => {
1339                        error!(
1340                            "wake_timer_loop: DRIVER SAYS: {:?}, deadline: {}, now: {}",
1341                            error,
1342                            format_timer(expired_deadline.into()),
1343                            format_timer(now.into()),
1344                        );
1345                        // We do not have a wake lease, so the system may sleep before
1346                        // we get to schedule a new timer. We have no way to avoid it
1347                        // today.
1348                        hrtimer_status = match timers.peek_deadline() {
1349                            None => None,
1350                            Some(deadline) => Some(
1351                                schedule_hrtimer(
1352                                    now,
1353                                    &timer_proxy,
1354                                    deadline,
1355                                    snd.clone(),
1356                                    &timer_config,
1357                                    &schedule_delay_prop,
1358                                )
1359                                .await,
1360                            ),
1361                        }
1362                    }
1363                }
1364            }
1365        }
1366
1367        {
1368            // Print and record diagnostics after each iteration, record the
1369            // duration for performance awareness.  Note that iterations happen
1370            // only occasionally, so these stats can remain unchanged for a long
1371            // time.
1372            trace::duration!(c"timekeeper", c"inspect");
1373            let now_formatted = format_timer(now.into());
1374            debug!("wake_timer_loop: now:                             {}", &now_formatted);
1375            now_formatted_prop.set(&now_formatted);
1376
1377            let pending_timers_count: u64 =
1378                timers.timer_count().try_into().expect("always convertible");
1379            debug!("wake_timer_loop: currently pending timer count:   {}", pending_timers_count);
1380            pending_timers_count_prop.set(pending_timers_count);
1381
1382            let pending_timers = format!("{}", timers);
1383            debug!("wake_timer_loop: currently pending timers:        {}", &timers);
1384            pending_timers_prop.set(&pending_timers);
1385
1386            let current_deadline: String = hrtimer_status
1387                .as_ref()
1388                .map(|s| format!("{}", format_timer(s.deadline.into())))
1389                .unwrap_or_else(|| "(none)".into());
1390            debug!("wake_timer_loop: current hardware timer deadline: {:?}", current_deadline);
1391            current_hw_deadline_prop.set(&current_deadline);
1392
1393            let remaining_duration_until_alarm = hrtimer_status
1394                .as_ref()
1395                .map(|s| format!("{}", format_duration((s.deadline - now).into())))
1396                .unwrap_or_else(|| "(none)".into());
1397            debug!(
1398                "wake_timer_loop: remaining duration until alarm:  {}",
1399                &remaining_duration_until_alarm
1400            );
1401            remaining_until_alarm_prop.set(&remaining_duration_until_alarm);
1402            debug!("---");
1403        }
1404    }
1405
1406    debug!("wake_timer_loop: exiting. This is unlikely in prod code.");
1407}
1408
1409/// Schedules a wake alarm.
1410///
1411/// Args:
1412/// - `now`: the time instant used as the value of current instant.
1413/// - `hrtimer`: the proxy for the hrtimer device driver.
1414/// - `deadline`: the time instant in the future at which the alarm should fire.
1415/// - `command_send`: the sender channel to use when the timer expires.
1416/// - `timer_config`: a configuration of the hardware timer showing supported resolutions and
1417///   max tick value.
1418/// - `needs_cancel`: if set, we must first cancel a hrtimer before scheduling a new one.
1419async fn schedule_hrtimer(
1420    now: fasync::BootInstant,
1421    hrtimer: &Box<dyn TimerOps>,
1422    deadline: fasync::BootInstant,
1423    mut command_send: mpsc::Sender<Cmd>,
1424    timer_config: &TimerConfig,
1425    schedule_delay_histogram: &finspect::IntExponentialHistogramProperty,
1426) -> TimerState {
1427    let timeout = std::cmp::max(zx::BootDuration::ZERO, deadline - now);
1428    trace::duration!(c"alarms", c"schedule_hrtimer", "timeout" => timeout.into_nanos());
1429    // When signaled, the hrtimer has been scheduled.
1430    let hrtimer_scheduled = zx::Event::create();
1431
1432    debug!(
1433        "schedule_hrtimer:\n\tnow: {}\n\tdeadline: {}\n\ttimeout: {}",
1434        format_timer(now.into()),
1435        format_timer(deadline.into()),
1436        format_duration(timeout),
1437    );
1438
1439    let slack = timer_config.pick_setting(timeout);
1440
1441    let resolution_nanos = slack.resolution.into_nanos();
1442    let ticks = slack.ticks();
1443    trace::instant!(c"alarms", c"hrtimer:programmed",
1444        trace::Scope::Process,
1445        "resolution_ns" => resolution_nanos,
1446        "ticks" => ticks
1447    );
1448    let start_and_wait_fut = hrtimer.start_and_wait(
1449        timer_config.id,
1450        &ffhh::Resolution::Duration(resolution_nanos),
1451        ticks,
1452        clone_handle(&hrtimer_scheduled),
1453    );
1454    let hrtimer_task = fasync::Task::local(async move {
1455        debug!("hrtimer_task: waiting for hrtimer driver response");
1456        trace::instant!(c"alarms", c"hrtimer:started", trace::Scope::Process);
1457        let response = start_and_wait_fut.await;
1458        trace::instant!(c"alarms", c"hrtimer:response", trace::Scope::Process);
1459        match response {
1460            Err(TimerOpsError::Fidl(e)) => {
1461                trace::instant!(c"alarms", c"hrtimer:response:fidl_error", trace::Scope::Process);
1462                debug!("hrtimer_task: hrtimer FIDL error: {:?}", e);
1463                command_send
1464                    .start_send(Cmd::AlarmFidlError { expired_deadline: now, error: e })
1465                    .unwrap();
1466                // BAD: no way to keep alive.
1467            }
1468            Err(TimerOpsError::Driver(e)) => {
1469                let driver_error_str = format!("{:?}", e);
1470                trace::instant!(c"alarms", c"hrtimer:response:driver_error", trace::Scope::Process, "error" => &driver_error_str[..]);
1471                debug!("schedule_hrtimer: hrtimer driver error: {:?}", e);
1472                command_send
1473                    .start_send(Cmd::AlarmDriverError { expired_deadline: now, error: e })
1474                    .unwrap();
1475                // BAD: no way to keep alive.
1476            }
1477            Ok(keep_alive) => {
1478                trace::instant!(c"alarms", c"hrtimer:response:alarm", trace::Scope::Process);
1479                debug!("hrtimer: got alarm response: {:?}", keep_alive);
1480                // May trigger sooner than the deadline.
1481                command_send
1482                    .start_send(Cmd::Alarm { expired_deadline: deadline, keep_alive })
1483                    .unwrap();
1484            }
1485        }
1486        debug!("hrtimer_task: exiting task.");
1487        trace::instant!(c"alarms", c"hrtimer:task_exit", trace::Scope::Process);
1488    });
1489    debug!("schedule_hrtimer: waiting for event to be signaled");
1490
1491    // We must wait here to ensure that the wake alarm has been scheduled.
1492    wait_signaled(&hrtimer_scheduled).await;
1493    let now_after_signaled = fasync::BootInstant::now();
1494    let duration_until_scheduled: zx::BootDuration = (now_after_signaled - now).into();
1495    if duration_until_scheduled > zx::BootDuration::from_nanos(LONG_DELAY_NANOS) {
1496        trace::duration!(c"alarms", c"schedule_hrtimer:unusual_duration",
1497            "duration" => duration_until_scheduled.into_nanos());
1498        warn!(
1499            "unusual duration until hrtimer scheduled: {}",
1500            format_duration(duration_until_scheduled)
1501        );
1502    }
1503    schedule_delay_histogram.insert(duration_until_scheduled.into_nanos());
1504    debug!("schedule_hrtimer: hrtimer wake alarm has been scheduled.");
1505    TimerState { task: hrtimer_task, deadline }
1506}
1507
1508/// Notify all `timers` that `reference_instant` has been reached.
1509///
1510/// The notified `timers` are removed from the list of timers to notify.
1511///
1512/// Args:
1513/// - `timers`: the collection of currently available timers.
1514/// - `lease_prototype`: an EventPair used as a wake lease.
1515/// - `reference_instant`: the time instant used as a reference for alarm notification.
1516///   All timers
1517fn notify_all(
1518    timers: &mut Timers,
1519    lease_prototype: &zx::EventPair,
1520    reference_instant: fasync::BootInstant,
1521    unusual_slack_histogram: &finspect::IntExponentialHistogramProperty,
1522) -> Result<usize> {
1523    trace::duration!(c"alarms", c"notify_all");
1524    let now = fasync::BootInstant::now();
1525    let mut expired = 0;
1526    while let Some(mut timer_node) = timers.maybe_expire_earliest(reference_instant) {
1527        expired += 1;
1528        // How much later than requested did the notification happen.
1529        let deadline = *timer_node.get_deadline();
1530        let alarm_id = timer_node.get_alarm_id().to_string();
1531        let cid = timer_node.get_cid().clone();
1532        let slack: zx::BootDuration = deadline - now;
1533        if slack < zx::BootDuration::from_nanos(-LONG_DELAY_NANOS) {
1534            trace::duration!(c"alarms", c"schedule_hrtimer:unusual_slack", "slack" => slack.into_nanos());
1535            // This alarm triggered noticeably later than it should have.
1536            warn!(
1537                "alarm id: {} had an unusually large slack: {}",
1538                alarm_id,
1539                format_duration(slack)
1540            );
1541        }
1542        if slack < zx::BootDuration::ZERO {
1543            unusual_slack_histogram.insert(-slack.into_nanos());
1544        }
1545        debug!(
1546            concat!(
1547                "wake_alarm_loop: ALARM alarm_id: \"{}\"\n\tdeadline: {},\n\tcid: {:?},\n\t",
1548                "reference_instant: {},\n\tnow: {},\n\tslack: {}",
1549            ),
1550            alarm_id,
1551            format_timer(deadline.into()),
1552            cid,
1553            format_timer(reference_instant.into()),
1554            format_timer(now.into()),
1555            format_duration(slack),
1556        );
1557        let lease = clone_handle(lease_prototype);
1558        trace::instant!(c"alarms", c"notify", trace::Scope::Process, "alarm_id" => &alarm_id[..], "cid" => cid);
1559        let _ = timer_node
1560            .take_responder()
1561            .map(|r| r.send(Ok(lease)))
1562            .map_or_else(|| Ok(()), |res| res)
1563            .map_err(|e| error!("could not signal responder: {:?}", e));
1564        trace::instant!(c"alarms", c"notified", trace::Scope::Process);
1565    }
1566    trace::instant!(c"alarms", c"notify", trace::Scope::Process, "expired_count" => expired);
1567    debug!("notify_all: expired count: {}", expired);
1568    Ok(expired)
1569    // A new timer is not scheduled yet here.
1570}
1571
1572/// The hrtimer driver service directory.  hrtimer driver APIs appear as randomly
1573/// named files in this directory. They are expected to come and go.
1574const HRTIMER_DIRECTORY: &str = "/dev/class/hrtimer";
1575
1576/// Connects to the high resolution timer device driver.
1577pub async fn connect_to_hrtimer_async() -> Result<ffhh::DeviceProxy> {
1578    debug!("connect_to_hrtimer: trying directory: {}", HRTIMER_DIRECTORY);
1579    let dir =
1580        fuchsia_fs::directory::open_in_namespace(HRTIMER_DIRECTORY, fidl_fuchsia_io::PERM_READABLE)
1581            .with_context(|| format!("Opening {}", HRTIMER_DIRECTORY))?;
1582    let path = device_watcher::watch_for_files(&dir)
1583        .await
1584        .with_context(|| format!("Watching for files in {}", HRTIMER_DIRECTORY))?
1585        .try_next()
1586        .await
1587        .with_context(|| format!("Getting a file from {}", HRTIMER_DIRECTORY))?;
1588    let path = path.ok_or_else(|| anyhow!("Could not find {}", HRTIMER_DIRECTORY))?;
1589    let path = path
1590        .to_str()
1591        .ok_or_else(|| anyhow!("Could not find a valid str for {}", HRTIMER_DIRECTORY))?;
1592    connect_to_named_protocol_at_dir_root::<ffhh::DeviceMarker>(&dir, path)
1593        .context("Failed to connect built-in service")
1594}
1595
1596#[cfg(test)]
1597mod tests {
1598    use super::*;
1599    use diagnostics_assertions::{assert_data_tree, AnyProperty};
1600    use futures::{select, Future};
1601    use std::task::Poll;
1602    use test_case::test_case;
1603    use test_util::{assert_gt, assert_lt};
1604
1605    // A test fixture function that sets up the fake wake alarms machinery.
1606    //
1607    // The user supplies a factory function with the async code to run.
1608    //
1609    // Args:
1610    //  - `run_for_duration`: the amount of fake time that the test should run for.
1611    //  - `test_fn_factory`: a normal function, which takes a WakeProxy, and returns
1612    //    an async closure that the test should run.
1613    fn run_in_fake_time_and_test_context<F, U, T>(
1614        run_for_duration: zx::MonotonicDuration,
1615        test_fn_factory: F,
1616    ) where
1617        F: FnOnce(fta::WakeAlarmsProxy, finspect::Inspector) -> U, // F returns an async closure.
1618        U: Future<Output = T> + 'static, // the async closure may return an arbitrary type T.
1619        T: 'static,
1620    {
1621        let mut exec = fasync::TestExecutor::new_with_fake_time(); // We will be running this test case in fake time.
1622        exec.set_fake_time(fasync::MonotonicInstant::from_nanos(0));
1623        let (mut fake_commands_in, fake_commands_out) = mpsc::channel::<FakeCmd>(0);
1624        let (hrtimer_proxy, hrtimer_task) = fake_hrtimer_connection(fake_commands_out);
1625        let inspector = finspect::component::inspector();
1626        let alarms = Rc::new(Loop::new(hrtimer_proxy, inspector.root().create_child("test")));
1627
1628        let (_handle, peer) = zx::EventPair::create();
1629
1630        let done_set_properties = zx::Event::create();
1631        let begin_test = clone_handle(&done_set_properties);
1632        let begin_serve = clone_handle(&done_set_properties);
1633
1634        let mut fake_commands_in_clone = fake_commands_in.clone();
1635        let config_task = async move {
1636            fake_commands_in
1637                .start_send(FakeCmd::SetProperties {
1638                    resolutions: vec![zx::Duration::from_nanos(43)],
1639                    max_ticks: 100,
1640                    keep_alive: peer,
1641                    done: clone_handle(&done_set_properties),
1642                })
1643                .unwrap();
1644        };
1645
1646        let (wake_proxy, wake_stream) =
1647            fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
1648
1649        let serving_task = async move {
1650            fasync::OnSignals::new(begin_serve, zx::Signals::EVENT_SIGNALED).await.unwrap();
1651            serve(alarms, wake_stream).await;
1652        };
1653
1654        let seq_fn_fut = test_fn_factory(wake_proxy, inspector.clone());
1655
1656        let test_task = async move {
1657            // Wait until configuration has completed.
1658            fasync::OnSignals::new(begin_test, zx::Signals::EVENT_SIGNALED).await.unwrap();
1659
1660            let result = seq_fn_fut.await;
1661
1662            // Request orderly shutdown.
1663            fake_commands_in_clone.start_send(FakeCmd::Exit).unwrap();
1664            result
1665        };
1666
1667        let mut main_fut = fasync::Task::local(async {
1668            let _r = futures::join!(hrtimer_task, config_task, serving_task, test_task);
1669        });
1670        run_in_fake_time(&mut exec, &mut main_fut, run_for_duration);
1671    }
1672
1673    // A loop that moves fake time forward in small increments, waking timers along the way.
1674    //
1675    // In almost all tests, we set up the environment for the test to run in, under a
1676    // test executor running in fake time. We then submit the resulting future
1677    // to this function for execution.
1678    //
1679    // This has been taken from //src/ui/lib/input_pipeline/src/autorepeater.rs
1680    // with some adaptation.
1681    fn run_in_fake_time<F>(
1682        executor: &mut fasync::TestExecutor,
1683        main_fut: &mut F,
1684        total_duration: zx::MonotonicDuration,
1685    ) where
1686        F: Future<Output = ()> + Unpin,
1687    {
1688        const INCREMENT: zx::MonotonicDuration = zx::MonotonicDuration::from_nanos(13);
1689        let mut current = zx::MonotonicDuration::ZERO;
1690        let mut poll_status = Poll::Pending;
1691
1692        // We run until either the future completes or the timeout is reached,
1693        // whichever comes first.
1694        // Running the future after it returns Poll::Ready is not allowed, so
1695        // we must exit the loop then.
1696        while current < (total_duration + INCREMENT) && poll_status == Poll::Pending {
1697            let next = executor.now() + INCREMENT;
1698            executor.set_fake_time(next);
1699            executor.wake_expired_timers();
1700            poll_status = executor.run_until_stalled(main_fut);
1701            current = current + INCREMENT;
1702        }
1703        let now = executor.now();
1704        assert_eq!(
1705            poll_status,
1706            Poll::Ready(()),
1707            "the main future did not complete at {}, perhaps increase total_duration?",
1708            format_timer(now.into())
1709        );
1710    }
1711
1712    fn create_fake_wake_lease() -> fidl_fuchsia_power_system::LeaseToken {
1713        let (_lease, peer) = zx::EventPair::create();
1714        peer
1715    }
1716
1717    #[test]
1718    fn timer_duration_no_overflow() {
1719        let duration1 = TimerDuration {
1720            resolution: zx::BootDuration::from_seconds(100_000_000),
1721            ticks: u64::MAX,
1722        };
1723        let duration2 = TimerDuration {
1724            resolution: zx::BootDuration::from_seconds(110_000_000),
1725            ticks: u64::MAX,
1726        };
1727        assert_eq!(duration1, duration1);
1728        assert_eq!(duration2, duration2);
1729
1730        assert_lt!(duration1, duration2);
1731        assert_gt!(duration2, duration1);
1732    }
1733
1734    #[test_case(
1735        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1736        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1737    )]
1738    #[test_case(
1739        TimerDuration::new(zx::BootDuration::from_nanos(1), 10),
1740        TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1741    )]
1742    #[test_case(
1743        TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1744        TimerDuration::new(zx::BootDuration::from_nanos(1), 10)
1745    )]
1746    #[test_case(
1747        TimerDuration::new(zx::BootDuration::from_micros(1), 1),
1748        TimerDuration::new(zx::BootDuration::from_nanos(1), 1000)
1749    )]
1750    fn test_slack_eq(one: TimerDuration, other: TimerDuration) {
1751        assert_eq!(one, other);
1752    }
1753
1754    #[test_case(
1755        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1756        TimerDuration::new(zx::BootDuration::from_nanos(1), 2)
1757    )]
1758    #[test_case(
1759        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1760        TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1761    )]
1762    fn test_slack_lt(one: TimerDuration, other: TimerDuration) {
1763        assert_lt!(one, other);
1764    }
1765
1766    #[test_case(
1767        TimerDuration::new(zx::BootDuration::from_nanos(1), 2),
1768        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1769    )]
1770    #[test_case(
1771        TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1772        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1773    )]
1774    fn test_slack_gt(one: TimerDuration, other: TimerDuration) {
1775        assert_gt!(one, other);
1776    }
1777
1778    #[test_case(
1779        vec![zx::BootDuration::from_nanos(1)],
1780        100,
1781        zx::BootDuration::from_nanos(0),
1782        TimerDuration::new(zx::BootDuration::from_nanos(1), 0) ; "Exact at 0x1ns"
1783    )]
1784    #[test_case(
1785        vec![zx::BootDuration::from_nanos(1)],
1786        100,
1787        zx::BootDuration::from_nanos(50),
1788        TimerDuration::new(zx::BootDuration::from_nanos(1), 50) ; "Exact at 50x1ns"
1789    )]
1790    #[test_case(
1791        vec![zx::BootDuration::from_nanos(2)],
1792        100,
1793        zx::BootDuration::from_nanos(50),
1794        TimerDuration::new(zx::BootDuration::from_nanos(2), 25) ; "Exact at 25x2ns"
1795    )]
1796    #[test_case(
1797        vec![zx::BootDuration::from_nanos(3)],
1798        100,
1799        zx::BootDuration::from_nanos(50),
1800        // The closest duration is 51ns.
1801        TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "Inexact at 51ns"
1802    )]
1803    #[test_case(
1804        vec![
1805            zx::BootDuration::from_nanos(3),
1806            zx::BootDuration::from_nanos(4)
1807        ],
1808        100,
1809        zx::BootDuration::from_nanos(50),
1810        TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "3ns is a better resolution"
1811    )]
1812    #[test_case(
1813        vec![
1814            zx::BootDuration::from_nanos(1000),
1815        ],
1816        100,
1817        zx::BootDuration::from_nanos(50),
1818        TimerDuration::new(zx::BootDuration::from_nanos(1000), 1) ;
1819        "950ns negative slack is the best we can do"
1820    )]
1821    #[test_case(
1822        vec![
1823            zx::BootDuration::from_nanos(1),
1824        ],
1825        10,
1826        zx::BootDuration::from_nanos(50),
1827        TimerDuration::new(zx::BootDuration::from_nanos(1), 10) ;
1828        "10ns positive slack is the best we can do"
1829    )]
1830    #[test_case(
1831        vec![
1832            zx::BootDuration::from_millis(1),
1833            zx::BootDuration::from_micros(100),
1834            zx::BootDuration::from_micros(10),
1835            zx::BootDuration::from_micros(1),
1836        ],
1837        20,  // Make only one of the resolutions above match.
1838        zx::BootDuration::from_micros(150),
1839        TimerDuration::new(zx::BootDuration::from_micros(10), 15) ;
1840        "Realistic case with resolutions from driver, should be 15us"
1841    )]
1842    #[test_case(
1843        vec![
1844            zx::BootDuration::from_millis(1),
1845            zx::BootDuration::from_micros(100),
1846            zx::BootDuration::from_micros(10),
1847            zx::BootDuration::from_micros(1),
1848        ],
1849        2000,  // Make only one of the resolutions above match.
1850        zx::BootDuration::from_micros(6000),
1851        TimerDuration::new(zx::BootDuration::from_millis(1), 6) ;
1852        "Coarser exact unit wins"
1853    )]
1854    fn test_pick_setting(
1855        resolutions: Vec<zx::BootDuration>,
1856        max_ticks: u64,
1857        duration: zx::BootDuration,
1858        expected: TimerDuration,
1859    ) {
1860        let config = TimerConfig::new_from_data(MAIN_TIMER_ID as u64, &resolutions[..], max_ticks);
1861        let actual = config.pick_setting(duration);
1862
1863        // .eq() does not work here, since we do not just require that the values
1864        // be equal, but also that the same resolution is used in both.
1865        assert_slack_eq(expected, actual);
1866    }
1867
1868    // TimerDuration assertion with human-friendly output in case of an error.
1869    fn assert_slack_eq(expected: TimerDuration, actual: TimerDuration) {
1870        let slack = expected.duration() - actual.duration();
1871        assert_eq!(
1872            actual.resolution(),
1873            expected.resolution(),
1874            "\n\texpected: {} ({})\n\tactual  : {} ({})\n\tslack: expected-actual={}",
1875            expected,
1876            format_duration(expected.duration()),
1877            actual,
1878            format_duration(actual.duration()),
1879            format_duration(slack)
1880        );
1881        assert_eq!(
1882            actual.ticks(),
1883            expected.ticks(),
1884            "\n\texpected: {} ({})\n\tactual  : {} ({})\n\tslack: expected-actual={}",
1885            expected,
1886            format_duration(expected.duration()),
1887            actual,
1888            format_duration(actual.duration()),
1889            format_duration(slack)
1890        );
1891    }
1892
1893    #[derive(Debug)]
1894    enum FakeCmd {
1895        SetProperties {
1896            resolutions: Vec<zx::BootDuration>,
1897            max_ticks: i64,
1898            keep_alive: zx::EventPair,
1899            done: zx::Event,
1900        },
1901        Exit,
1902    }
1903
1904    use std::cell::RefCell;
1905    use std::rc::Rc;
1906
1907    // A fake that emulates some aspects of the hrtimer driver.
1908    //
1909    // Specifically it can be configured with different resolutions, and will
1910    // bomb out if any waiting methods are called twice in a succession, without
1911    // canceling the timer in between.
1912    fn fake_hrtimer_connection(
1913        rcv: mpsc::Receiver<FakeCmd>,
1914    ) -> (ffhh::DeviceProxy, fasync::Task<()>) {
1915        debug!("fake_hrtimer_connection: entry.");
1916        let (hrtimer, mut stream) =
1917            fidl::endpoints::create_proxy_and_stream::<ffhh::DeviceMarker>();
1918        let task = fasync::Task::local(async move {
1919            let mut rcv = rcv.fuse();
1920            let timer_properties = Rc::new(RefCell::new(None));
1921            let wake_lease = Rc::new(RefCell::new(None));
1922
1923            // Set to true when the hardware timer is supposed to be running.
1924            // Hardware timer may not be reprogrammed without canceling it first,
1925            // make sure the tests fail the same way as production would.
1926            let timer_running = Rc::new(RefCell::new(false));
1927
1928            loop {
1929                let timer_properties = timer_properties.clone();
1930                let wake_lease = wake_lease.clone();
1931                select! {
1932                    cmd = rcv.next() => {
1933                        debug!("fake_hrtimer_connection: cmd: {:?}", cmd);
1934                        match cmd {
1935                            Some(FakeCmd::Exit) => { break; }
1936                            Some(FakeCmd::SetProperties{ resolutions, max_ticks, keep_alive, done}) => {
1937                                let mut timer_props = vec![];
1938                                for v in 0..10 {
1939                                    timer_props.push(ffhh::TimerProperties {
1940                                        supported_resolutions: Some(
1941                                            resolutions.iter()
1942                                                .map(|d| ffhh::Resolution::Duration(d.into_nanos())).collect()),
1943                                        max_ticks: Some(max_ticks.try_into().unwrap()),
1944                                        // start_and_wait method works.
1945                                        supports_wait: Some(true),
1946                                        id: Some(v),
1947                                        ..Default::default()
1948                                        },
1949                                    );
1950                                }
1951                                *timer_properties.borrow_mut() = Some(timer_props);
1952                                *wake_lease.borrow_mut() = Some(keep_alive);
1953                                debug!("set timer properties to: {:?}", timer_properties);
1954                                signal(&done);
1955                            }
1956                            e => {
1957                                panic!("unrecognized command: {:?}", e);
1958                            }
1959                        }
1960                        // Set some responses if we have them.
1961                    },
1962                    event = stream.next() => {
1963                        debug!("fake_hrtimer_connection: event: {:?}", event);
1964                        if let Some(Ok(event)) = event {
1965                            match event {
1966                                ffhh::DeviceRequest::Start { responder, .. } => {
1967                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
1968                                    *timer_running.borrow_mut() = true;
1969                                    responder.send(Ok(())).expect("");
1970                                }
1971                                ffhh::DeviceRequest::Stop { responder, .. } => {
1972                                    *timer_running.borrow_mut() = false;
1973                                    responder.send(Ok(())).expect("");
1974                                }
1975                                ffhh::DeviceRequest::GetTicksLeft { responder, .. } => {
1976                                    responder.send(Ok(1)).expect("");
1977                                }
1978                                ffhh::DeviceRequest::SetEvent { responder, .. } => {
1979                                    responder.send(Ok(())).expect("");
1980                                }
1981                                ffhh::DeviceRequest::StartAndWait { id, resolution, ticks, setup_event, responder, .. } => {
1982                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
1983                                    *timer_running.borrow_mut() = true;
1984                                    debug!("fake_hrtimer_connection: starting timer: \"{}\", resolution: {:?}, ticks: {}", id, resolution, ticks);
1985                                    let ticks: i64 = ticks.try_into().unwrap();
1986                                    let sleep_duration  = zx::BootDuration::from_nanos(ticks * match resolution {
1987                                        ffhh::Resolution::Duration(e) => e,
1988                                        _ => {
1989                                            error!("resolution has an unexpected value");
1990                                            1
1991                                        }
1992                                    });
1993                                    let timer_running_clone = timer_running.clone();
1994                                    fasync::Task::local(async move {
1995                                        // Respond after the requested sleep time. In tests this will
1996                                        // be sleeping in fake time.
1997                                        fasync::Timer::new(sleep_duration).await;
1998                                        *timer_running_clone.borrow_mut() = false;
1999                                        responder.send(Ok(clone_handle(wake_lease.borrow().as_ref().unwrap()))).unwrap();
2000
2001                                        // Signaling the setup event allows the client to proceed
2002                                        // with post-scheduling work.
2003                                        signal(&setup_event);
2004
2005                                    }).detach();
2006                                }
2007                                ffhh::DeviceRequest::StartAndWait2 { responder, .. } => {
2008                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2009                                    *timer_running.borrow_mut() = true;
2010                                    responder.send(Err(ffhh::DriverError::InternalError)).expect("");
2011                                }
2012                                ffhh::DeviceRequest::GetProperties { responder, .. } => {
2013                                    if (*timer_properties).borrow().is_none() {
2014                                        error!("timer_properties is empty, this is not what you want!");
2015                                    }
2016                                    responder
2017                                        .send(ffhh::Properties {
2018                                            timers_properties: (*timer_properties).borrow().clone(),
2019                                            ..Default::default()
2020                                        })
2021                                        .expect("");
2022                                }
2023                                ffhh::DeviceRequest::ReadTimer { responder, .. } => {
2024                                    responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2025                                }
2026                                ffhh::DeviceRequest::ReadClock { responder, .. } => {
2027                                    responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2028                                }
2029                                ffhh::DeviceRequest::_UnknownMethod { .. } => todo!(),
2030                            }
2031                        }
2032                    },
2033                }
2034            }
2035            debug!("fake_hrtimer_connection: exiting");
2036        });
2037        (hrtimer, task)
2038    }
2039
2040    #[fuchsia::test]
2041    fn test_basic_timed_wait() {
2042        let deadline = zx::BootInstant::from_nanos(100);
2043        let test_duration = zx::MonotonicDuration::from_nanos(110);
2044        run_in_fake_time_and_test_context(test_duration, |wake_proxy, _| async move {
2045            let setup_done = zx::Event::create();
2046            let duplicate = clone_handle(&setup_done);
2047
2048            wake_proxy
2049                .set_and_wait(
2050                    deadline.into(),
2051                    fta::SetAndWaitMode::NotifySetupDone(setup_done),
2052                    "Hello".into(),
2053                )
2054                .await
2055                .unwrap()
2056                .unwrap();
2057
2058            assert_gt!(fasync::BootInstant::now().into_nanos(), deadline.into_nanos());
2059
2060            // Ideally this would test that the event was signaled as soon as the alarm was set, and
2061            // before it was signaled, but at least it verifies that it was signaled.
2062            let signals = duplicate
2063                .wait_handle(zx::Signals::EVENT_SIGNALED, zx::MonotonicInstant::INFINITE_PAST)
2064                .unwrap();
2065            assert_eq!(signals, zx::Signals::EVENT_SIGNALED);
2066        });
2067    }
2068
2069    #[test_case(
2070        zx::BootInstant::from_nanos(100),
2071        zx::BootInstant::from_nanos(200),
2072        zx::MonotonicDuration::from_nanos(250) ;
2073        "Two timers: one at 100 and another at 200 ns"
2074    )]
2075    #[test_case(
2076        zx::BootInstant::from_nanos(100),
2077        zx::BootInstant::from_nanos(100),
2078        // A tight end-of-test will detect a stuck timer.
2079        zx::MonotonicDuration::from_nanos(104) ;
2080        "Two timers at the same deadline."
2081    )]
2082    #[test_case(
2083        zx::BootInstant::from_nanos(-1),
2084        zx::BootInstant::from_nanos(-1),
2085        zx::MonotonicDuration::from_nanos(30) ;
2086        "Two timers expire immediately."
2087    )]
2088    #[fuchsia::test]
2089    fn test_timed_wait_two_timers_params(
2090        // One timer scheduled at this instant (fake time starts from zero).
2091        first_deadline: zx::BootInstant,
2092        // Another timer scheduled at this instant.
2093        second_deadline: zx::BootInstant,
2094        // Run the fake time for this long.
2095        duration: zx::MonotonicDuration,
2096    ) {
2097        run_in_fake_time_and_test_context(duration, |wake_proxy, _| async move {
2098            let lease1 = fta::SetAndWaitMode::KeepAlive(create_fake_wake_lease());
2099            let fut1 = wake_proxy.set_and_wait(first_deadline.into(), lease1, "Hello1".into());
2100
2101            let lease2 = fta::SetAndWaitMode::KeepAlive(create_fake_wake_lease());
2102            let fut2 = wake_proxy.set_and_wait(second_deadline.into(), lease2, "Hello2".into());
2103
2104            let (result1, result2) = futures::join!(fut1, fut2);
2105
2106            result1.unwrap().unwrap();
2107            result2.unwrap().unwrap();
2108
2109            assert_gt!(fasync::BootInstant::now().into_nanos(), first_deadline.into_nanos());
2110            assert_gt!(fasync::BootInstant::now().into_nanos(), second_deadline.into_nanos());
2111        });
2112    }
2113
2114    #[test_case(
2115        zx::BootInstant::from_nanos(100),
2116        zx::BootInstant::from_nanos(200),
2117        zx::MonotonicDuration::from_nanos(250) ;
2118        "Reschedule with push-out"
2119    )]
2120    #[test_case(
2121        zx::BootInstant::from_nanos(100),
2122        zx::BootInstant::from_nanos(100),
2123        // A tight end-of-test will detect a stuck timer.
2124        zx::MonotonicDuration::from_nanos(104) ;
2125        "Reschedule with same deadline"
2126    )]
2127    #[test_case(
2128        zx::BootInstant::from_nanos(200),
2129        zx::BootInstant::from_nanos(100),
2130        // A tight end-of-test will detect a stuck timer.
2131        zx::MonotonicDuration::from_nanos(240) ;
2132        "Pull in"
2133    )]
2134    #[fuchsia::test]
2135    fn test_timed_wait_same_timer(
2136        // One timer scheduled at this instant (fake time starts from zero).
2137        first_deadline: zx::BootInstant,
2138        // Another timer scheduled at this instant.
2139        second_deadline: zx::BootInstant,
2140        // Run the fake time for this long.
2141        duration: zx::MonotonicDuration,
2142    ) {
2143        run_in_fake_time_and_test_context(duration, |wake_proxy, _| async move {
2144            let lease1 = fta::SetAndWaitMode::KeepAlive(create_fake_wake_lease());
2145
2146            wake_proxy
2147                .set_and_wait(first_deadline.into(), lease1, "Hello".into())
2148                .await
2149                .unwrap()
2150                .unwrap();
2151            let lease2 = fta::SetAndWaitMode::KeepAlive(create_fake_wake_lease());
2152            wake_proxy
2153                .set_and_wait(second_deadline.into(), lease2, "Hello2".into())
2154                .await
2155                .unwrap()
2156                .unwrap();
2157        });
2158    }
2159
2160    // Test what happens when we schedule a timer, then change our mind and
2161    // reschedule the same timer, but with a sooner deadline.
2162    #[fuchsia::test]
2163    fn test_reschedule_pull_in() {
2164        const LONG_DEADLINE_NANOS: i64 = 200;
2165        const SHORT_DEADLINE_NANOS: i64 = 100;
2166        const ALARM_ID: &str = "Hello";
2167        run_in_fake_time_and_test_context(
2168            zx::MonotonicDuration::from_nanos(LONG_DEADLINE_NANOS + 50),
2169            |wake_proxy, _| async move {
2170                let wake_proxy = Rc::new(RefCell::new(wake_proxy));
2171
2172                let keep_alive = fta::SetAndWaitMode::KeepAlive(create_fake_wake_lease());
2173
2174                let (mut sync_send, mut sync_recv) = mpsc::channel(1);
2175
2176                // Schedule timer with a long timeout first. Let it wait, then
2177                // try to reschedule the same timer
2178                let wake_proxy_clone = wake_proxy.clone();
2179                let long_deadline_fut = async move {
2180                    let wake_fut = wake_proxy_clone.borrow().set_and_wait(
2181                        zx::BootInstant::from_nanos(LONG_DEADLINE_NANOS).into(),
2182                        keep_alive,
2183                        ALARM_ID.into(),
2184                    );
2185                    // Allow the rest of the test to proceed from here.
2186                    sync_send.send(()).await.unwrap();
2187
2188                    // Yield-wait for the first scheduled timer.
2189                    wake_fut.await.unwrap().unwrap();
2190                };
2191
2192                // Schedule the same timer as above, but with a shorter deadline.
2193                // The result should be that when the short deadline expires, it's
2194                // sooner than the long deadline.
2195                let short_deadline_fut = async move {
2196                    // Wait until we know that the long deadline timer has been scheduled.
2197                    let _ = sync_recv.next().await;
2198
2199                    let keep_alive2 = fta::SetAndWaitMode::KeepAlive(create_fake_wake_lease());
2200                    let _ = wake_proxy
2201                        .borrow()
2202                        .set_and_wait(
2203                            zx::BootInstant::from_nanos(SHORT_DEADLINE_NANOS).into(),
2204                            keep_alive2,
2205                            ALARM_ID.into(),
2206                        )
2207                        .await
2208                        .unwrap()
2209                        .unwrap();
2210
2211                    // We get here presumably after the "short" deadline expires, verify that:
2212                    assert_gt!(fasync::BootInstant::now().into_nanos(), SHORT_DEADLINE_NANOS);
2213                    assert_lt!(fasync::BootInstant::now().into_nanos(), LONG_DEADLINE_NANOS);
2214                };
2215                futures::join!(short_deadline_fut, long_deadline_fut);
2216            },
2217        );
2218    }
2219
2220    // Test what happens when we schedule a timer, then change our mind and
2221    // reschedule the same timer, but with a sooner deadline.
2222    #[fuchsia::test]
2223    fn test_reschedule_push_out() {
2224        const LONG_DEADLINE_NANOS: i64 = 200;
2225        const SHORT_DEADLINE_NANOS: i64 = 100;
2226        const ALARM_ID: &str = "Hello";
2227        run_in_fake_time_and_test_context(
2228            zx::MonotonicDuration::from_nanos(LONG_DEADLINE_NANOS + 50),
2229            |wake_proxy, inspector| async move {
2230                let wake_proxy = Rc::new(RefCell::new(wake_proxy));
2231
2232                let keep_alive = fta::SetAndWaitMode::KeepAlive(create_fake_wake_lease());
2233
2234                let (mut sync_send, mut sync_recv) = mpsc::channel(1);
2235
2236                // Schedule timer with a long timeout first. Let it wait, then
2237                // try to reschedule the same timer
2238                let wake_proxy_clone = wake_proxy.clone();
2239                let short_deadline_fut = async move {
2240                    let wake_fut = wake_proxy_clone.borrow().set_and_wait(
2241                        zx::BootInstant::from_nanos(SHORT_DEADLINE_NANOS).into(),
2242                        keep_alive,
2243                        ALARM_ID.into(),
2244                    );
2245                    // Allow the rest of the test to proceed from here.
2246                    sync_send.send(()).await.unwrap();
2247
2248                    // Yield-wait for the first scheduled timer.
2249                    let result = wake_fut.await.unwrap();
2250                    assert_eq!(
2251                        result,
2252                        Err(fta::WakeAlarmsError::Dropped),
2253                        "expected wake alarm to be dropped"
2254                    );
2255                    assert_gt!(fasync::BootInstant::now().into_nanos(), SHORT_DEADLINE_NANOS);
2256                };
2257
2258                // Schedule the same timer as above, but with a shorter deadline.
2259                // The result should be that when the short deadline expires, it's
2260                // sooner than the long deadline.
2261                let long_deadline_fut = async move {
2262                    // Wait until we know that the other deadline timer has been scheduled.
2263                    let _ = sync_recv.next().await;
2264
2265                    let keep_alive2 = fta::SetAndWaitMode::KeepAlive(create_fake_wake_lease());
2266                    let _ = wake_proxy
2267                        .borrow()
2268                        .set_and_wait(
2269                            zx::BootInstant::from_nanos(LONG_DEADLINE_NANOS).into(),
2270                            keep_alive2,
2271                            ALARM_ID.into(),
2272                        )
2273                        .await
2274                        .unwrap()
2275                        .unwrap();
2276
2277                    // Both the short and the long deadline expire.
2278                    assert_gt!(fasync::BootInstant::now().into_nanos(), LONG_DEADLINE_NANOS);
2279                };
2280                futures::join!(long_deadline_fut, short_deadline_fut);
2281
2282                // The values in the inspector tree are fixed because the test
2283                // runs fully deterministically in fake time.
2284                assert_data_tree!(inspector, root: {
2285                    test: {
2286                        hardware: {
2287                            // All alarms fired, so this should be "none".
2288                            current_deadline: "(none)",
2289                            remaining_until_alarm: "(none)",
2290                        },
2291                        now_formatted: "247ns (247)",
2292                        now_ns: 247i64,
2293                        pending_timers: "\n\t",
2294                        pending_timers_count: 0u64,
2295                        requested_deadlines_ns: AnyProperty,
2296                        schedule_delay_ns: AnyProperty,
2297                        slack_ns: AnyProperty,
2298                    },
2299                });
2300            },
2301        );
2302    }
2303}