shutdown_shim/
shutdown_watcher.rs

1// Copyright 2020 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::reboot_reasons::RebootReasons;
6
7use either::Either;
8use fidl::endpoints::Proxy;
9use fidl_fuchsia_hardware_power_statecontrol as fpower;
10use fuchsia_async::{self as fasync, DurationExt, TimeoutExt};
11use fuchsia_inspect::{self as inspect, NumericProperty};
12use futures::lock::Mutex;
13use futures::prelude::*;
14use futures::TryStreamExt;
15use std::collections::HashMap;
16use std::sync::Arc;
17use zx::AsHandleRef;
18
19/// Summary: Provides an implementation of the
20/// fuchsia.hardware.power.statecontrol.RebootMethodsWatcherRegister protocol that allows other
21/// components in the system to register to be notified of pending system shutdown requests and the
22/// associated shutdown reason.
23///
24/// FIDL dependencies:
25///     - fuchsia.hardware.power.statecontrol.RebootMethodsWatcherRegister: the serverprovides a
26///       Register API that other components in the system can use
27///       to receive notifications about system shutdown events and reasons
28///     - fuchsia.hardware.power.statecontrol.RebootMethodsWatcher: the server receives an instance
29///       of this protocol over the RebootMethodsWatcherRegister channel, and uses this channel to
30///       send shutdown notifications
31pub struct ShutdownWatcher {
32    /// Contains all the registered RebootMethodsWatcher channels to be notified when a reboot
33    /// request is received.
34    reboot_watchers: Arc<
35        Mutex<
36            // TODO(https://fxbug.dev/385742868): The value is the proxy (either
37            // deprecated or not). Clean this up once the deprecated version is
38            // removed from the API.
39            HashMap<u32, Either<fpower::RebootMethodsWatcherProxy, fpower::RebootWatcherProxy>>,
40        >,
41    >,
42    inspect: Arc<Mutex<InspectData>>,
43}
44
45impl ShutdownWatcher {
46    const NOTIFY_RESPONSE_TIMEOUT: zx::MonotonicDuration = zx::MonotonicDuration::from_seconds(
47        fpower::MAX_REBOOT_WATCHER_RESPONSE_TIME_SECONDS as i64,
48    );
49
50    #[cfg(test)]
51    fn new() -> Arc<Self> {
52        let inspector = inspect::Inspector::new(fuchsia_inspect::InspectorConfig::default());
53        Self::new_with_inspector(&inspector)
54    }
55
56    pub fn new_with_inspector(inspector: &inspect::Inspector) -> Arc<Self> {
57        Arc::new(Self {
58            reboot_watchers: Arc::new(Mutex::new(HashMap::new())),
59            inspect: Arc::new(Mutex::new(InspectData::new(
60                inspector.root(),
61                "ShutdownWatcher".to_string(),
62            ))),
63        })
64    }
65
66    /// Handles a new client connection to the RebootMethodsWatcherRegister service.
67    pub async fn handle_reboot_register_request(
68        self: Arc<Self>,
69        mut stream: fpower::RebootMethodsWatcherRegisterRequestStream,
70    ) {
71        while let Ok(Some(req)) = stream.try_next().await {
72            match req {
73                // TODO(https://fxbug.dev/385742868): Delete this method
74                // once it's removed from the API.
75                fpower::RebootMethodsWatcherRegisterRequest::Register {
76                    watcher,
77                    control_handle: _,
78                } => {
79                    self.add_deprecated_reboot_watcher(watcher.into_proxy()).await;
80                }
81                // TODO(https://fxbug.dev/385742868): Delete this method
82                // once it's removed from the API.
83                fpower::RebootMethodsWatcherRegisterRequest::RegisterWithAck {
84                    watcher,
85                    responder,
86                } => {
87                    self.add_deprecated_reboot_watcher(watcher.into_proxy()).await;
88                    let _ = responder.send();
89                }
90                fpower::RebootMethodsWatcherRegisterRequest::RegisterWatcher {
91                    watcher,
92                    responder,
93                } => {
94                    self.add_reboot_watcher(watcher.into_proxy()).await;
95                    let _ = responder.send();
96                }
97            }
98        }
99    }
100
101    /// Adds a new RebootMethodsWatcher channel to the list of registered watchers.
102    async fn add_deprecated_reboot_watcher(&self, watcher: fpower::RebootMethodsWatcherProxy) {
103        fuchsia_trace::duration!(
104            c"shutdown-shim",
105            c"ShutdownWatcher::add_deprecated_reboot_watcher",
106            "watcher" => watcher.as_channel().raw_handle()
107        );
108
109        println!("[shutdown-shim] Adding a deprecated reboot watcher");
110        // If the client closes the watcher channel, remove it from our `reboot_watchers` map and
111        // notify all clients
112        let key = watcher.as_channel().raw_handle();
113        let proxy = watcher.clone();
114        let reboot_watchers = self.reboot_watchers.clone();
115        let inspect = self.inspect.clone();
116        fasync::Task::spawn(async move {
117            let _ = proxy.on_closed().await;
118            {
119                reboot_watchers.lock().await.remove(&key);
120            }
121            inspect.lock().await.remove_reboot_watcher();
122        })
123        .detach();
124
125        {
126            let mut watchers_mut = self.reboot_watchers.lock().await;
127            watchers_mut.insert(key, Either::Left(watcher));
128        }
129        self.inspect.lock().await.add_reboot_watcher();
130    }
131
132    /// Adds a new RebootMethodsWatcher channel to the list of registered watchers.
133    async fn add_reboot_watcher(&self, watcher: fpower::RebootWatcherProxy) {
134        fuchsia_trace::duration!(
135            c"shutdown-shim",
136            c"ShutdownWatcher::add_reboot_watcher",
137            "watcher" => watcher.as_channel().raw_handle()
138        );
139
140        // If the client closes the watcher channel, remove it from our `reboot_watchers` map
141        println!("[shutdown-shim] Adding a reboot watcher");
142        let key = watcher.as_channel().raw_handle();
143        let proxy = watcher.clone();
144        let reboot_watchers = self.reboot_watchers.clone();
145        let inspect = self.inspect.clone();
146        fasync::Task::spawn(async move {
147            let _ = proxy.on_closed().await;
148            {
149                reboot_watchers.lock().await.remove(&key);
150            }
151            inspect.lock().await.remove_reboot_watcher();
152        })
153        .detach();
154
155        {
156            let mut watchers_mut = self.reboot_watchers.lock().await;
157            watchers_mut.insert(key, Either::Right(watcher));
158        }
159        self.inspect.lock().await.add_reboot_watcher();
160    }
161
162    /// Handles the SystemShutdown message by notifying the appropriate registered watchers.
163    pub async fn handle_system_shutdown_message(&self, reasons: RebootReasons) {
164        self.notify_reboot_watchers(reasons, Self::NOTIFY_RESPONSE_TIMEOUT).await
165    }
166
167    async fn notify_reboot_watchers(&self, reasons: RebootReasons, timeout: zx::MonotonicDuration) {
168        // Instead of waiting for https://fxbug.dev/42120903, use begin and end pair of macros.
169        fuchsia_trace::duration_begin!(
170            c"shutdown-shim",
171            c"ShutdownWatcher::notify_reboot_watchers",
172            "reasons" => format!("{:?}", reasons).as_str()
173        );
174
175        // Create a future for each watcher that calls the watcher's `on_reboot` method and returns
176        // the watcher proxy if the response was received within the timeout, or None otherwise. We
177        // take this approach so that watchers that timed out have their channel dropped
178        // (https://fxbug.dev/42131208).
179        let watcher_futures = {
180            // Take the current watchers out of the RefCell because we'll be modifying the vector
181            let watchers = self.reboot_watchers.lock().await;
182            println!("[shutdown-shim] notifying {:?} watchers of reboot", watchers.len());
183            watchers.clone().into_iter().map(|(key, watcher_proxy)| {
184                let reasons = reasons.clone();
185                async move {
186                    let deadline = timeout.after_now();
187                    let result = match &watcher_proxy {
188                        Either::Left(proxy) => {
189                            futures::future::Either::Left(proxy.on_reboot(reasons.to_deprecated()))
190                        }
191                        Either::Right(proxy) => {
192                            futures::future::Either::Right(proxy.on_reboot(&reasons.into()))
193                        }
194                    }
195                    .map_err(|_| ())
196                    .on_timeout(deadline, || Err(()))
197                    .await;
198
199                    match result {
200                        Ok(()) => Some((key, watcher_proxy)),
201                        Err(()) => None,
202                    }
203                }
204            })
205        };
206
207        // Run all of the futures, collecting the successful watcher proxies into a vector
208        let new_watchers = futures::future::join_all(watcher_futures)
209            .await
210            .into_iter()
211            .filter_map(|watcher_opt| watcher_opt) // Unwrap the Options while filtering out None
212            .collect();
213
214        // Repopulate the successful watcher proxies back into the `reboot_watchers` RefCell
215        *self.reboot_watchers.lock().await = new_watchers;
216
217        fuchsia_trace::duration_end!(
218            c"shutdown-shim",
219            c"ShutdownWatcher::notify_reboot_watchers",
220            "reasons" => format!("{:?}", reasons).as_str()
221        );
222    }
223}
224
225struct InspectData {
226    reboot_watcher_current_connections: inspect::UintProperty,
227    reboot_watcher_total_connections: inspect::UintProperty,
228}
229
230impl InspectData {
231    fn new(parent: &inspect::Node, name: String) -> Self {
232        // Create a local root node and properties
233        let root = parent.create_child(name);
234        let reboot_watcher_current_connections =
235            root.create_uint("reboot_watcher_current_connections", 0);
236        let reboot_watcher_total_connections =
237            root.create_uint("reboot_watcher_total_connections", 0);
238
239        // Pass ownership of the new node to the parent node, otherwise it'll be dropped
240        parent.record(root);
241
242        InspectData { reboot_watcher_current_connections, reboot_watcher_total_connections }
243    }
244
245    fn add_reboot_watcher(&self) {
246        self.reboot_watcher_current_connections.add(1);
247        self.reboot_watcher_total_connections.add(1);
248    }
249
250    fn remove_reboot_watcher(&self) {
251        self.reboot_watcher_current_connections.subtract(1);
252    }
253}
254
255#[cfg(test)]
256mod tests {
257    use super::*;
258    use assert_matches::assert_matches;
259    use diagnostics_assertions::assert_data_tree;
260    use fidl::endpoints::{ControlHandle, RequestStream};
261
262    // In this test, convert float to integer for simpilification
263    fn seconds(seconds: f64) -> zx::MonotonicDuration {
264        zx::MonotonicDuration::from_seconds(seconds as i64)
265    }
266
267    /// Tests for the presence and correctness of inspect data
268    #[fuchsia::test]
269    async fn test_inspect_data() {
270        let inspector = inspect::Inspector::default();
271        let registrar = ShutdownWatcher::new_with_inspector(&inspector);
272
273        assert_data_tree!(
274            inspector,
275            root: {
276                ShutdownWatcher: {
277                    reboot_watcher_current_connections: 0u64,
278                    reboot_watcher_total_connections: 0u64
279                }
280            }
281        );
282
283        let (watcher_proxy, s) = fidl::endpoints::create_proxy::<fpower::RebootWatcherMarker>();
284        registrar.add_reboot_watcher(watcher_proxy.clone()).await;
285
286        assert_data_tree!(
287            inspector,
288            root: {
289                ShutdownWatcher: {
290                    reboot_watcher_current_connections: 1u64,
291                    reboot_watcher_total_connections: 1u64
292                }
293            }
294        );
295
296        drop(s);
297        watcher_proxy.on_closed().await.expect("closed");
298
299        assert_data_tree!(
300            @retry 10,
301            inspector,
302            root: {
303                ShutdownWatcher: {
304                    reboot_watcher_current_connections: 0u64,
305                    reboot_watcher_total_connections: 1u64
306                }
307            }
308        );
309
310        let (watcher_proxy, s) = fidl::endpoints::create_proxy::<fpower::RebootWatcherMarker>();
311        registrar.add_reboot_watcher(watcher_proxy.clone()).await;
312
313        assert_data_tree!(
314            @retry 10,
315            inspector,
316            root: {
317                ShutdownWatcher: {
318                    reboot_watcher_current_connections: 1u64,
319                    reboot_watcher_total_connections: 2u64
320                }
321            }
322        );
323        drop(s);
324        watcher_proxy.on_closed().await.expect("closed");
325
326        assert_data_tree!(
327            @retry 10,
328            inspector,
329            root: {
330                ShutdownWatcher: {
331                    reboot_watcher_current_connections: 0u64,
332                    reboot_watcher_total_connections: 2u64
333                }
334            }
335        );
336    }
337
338    /// Tests that a client can successfully register a reboot watcher, and the registered watcher
339    /// receives the expected reboot notification.
340    #[fasync::run_singlethreaded(test)]
341    async fn test_add_reboot_watcher() {
342        let registrar = ShutdownWatcher::new();
343
344        // Create the proxy/stream to register the watcher
345        let (register_proxy, register_stream) = fidl::endpoints::create_proxy_and_stream::<
346            fpower::RebootMethodsWatcherRegisterMarker,
347        >();
348
349        // Start the RebootMethodsWatcherRegister server that will handle Register calls from
350        // register_proxy
351        let registrar_clone = registrar.clone();
352        fasync::Task::local(async move {
353            registrar_clone.handle_reboot_register_request(register_stream).await;
354        })
355        .detach();
356
357        // Create the watcher proxy/stream to receive reboot notifications
358        let (watcher_client, mut watcher_stream) =
359            fidl::endpoints::create_request_stream::<fpower::RebootWatcherMarker>();
360
361        // Call the Register API, passing in the watcher_client end
362        assert_matches!(register_proxy.register_watcher(watcher_client).await, Ok(()));
363        // Signal the watchers
364        registrar
365            .notify_reboot_watchers(
366                RebootReasons::new(fpower::RebootReason2::UserRequest),
367                seconds(0.0),
368            )
369            .await;
370
371        // Verify the watcher_stream gets the correct reboot notification
372        let reasons = assert_matches!(
373            watcher_stream.try_next().await.unwrap().unwrap(),
374            fpower::RebootWatcherRequest::OnReboot {
375                options: fpower::RebootOptions{reasons: Some(reasons), ..},
376                ..
377            } => reasons
378        );
379        assert_eq!(&reasons[..], [fpower::RebootReason2::UserRequest]);
380    }
381
382    /// Tests that a reboot watcher is delivered the correct reboot reason
383    #[fasync::run_singlethreaded(test)]
384    async fn test_reboot_watcher_reason() {
385        let registrar = ShutdownWatcher::new();
386        let (watcher_proxy, mut watcher_stream) =
387            fidl::endpoints::create_proxy_and_stream::<fpower::RebootWatcherMarker>();
388        registrar.add_reboot_watcher(watcher_proxy).await;
389        registrar
390            .notify_reboot_watchers(
391                RebootReasons::new(fpower::RebootReason2::HighTemperature),
392                seconds(0.0),
393            )
394            .await;
395
396        let reasons = match watcher_stream.try_next().await {
397            Ok(Some(fpower::RebootWatcherRequest::OnReboot {
398                options: fpower::RebootOptions { reasons: Some(reasons), .. },
399                ..
400            })) => reasons,
401            e => panic!("Unexpected watcher_stream result: {:?}", e),
402        };
403
404        assert_eq!(&reasons[..], [fpower::RebootReason2::HighTemperature]);
405    }
406
407    /// Tests that if there are multiple registered reboot watchers, each one will receive the
408    /// expected reboot notification.
409    #[fasync::run_singlethreaded(test)]
410    async fn test_multiple_reboot_watchers() {
411        let registrar = ShutdownWatcher::new();
412
413        // Create three separate reboot watchers
414        let (watcher_proxy1, mut watcher_stream1) =
415            fidl::endpoints::create_proxy_and_stream::<fpower::RebootWatcherMarker>();
416        registrar.add_reboot_watcher(watcher_proxy1).await;
417
418        let (watcher_proxy2, mut watcher_stream2) =
419            fidl::endpoints::create_proxy_and_stream::<fpower::RebootWatcherMarker>();
420        registrar.add_reboot_watcher(watcher_proxy2).await;
421
422        let (watcher_proxy3, mut watcher_stream3) =
423            fidl::endpoints::create_proxy_and_stream::<fpower::RebootWatcherMarker>();
424        registrar.add_reboot_watcher(watcher_proxy3).await;
425
426        // Close the channel of the first watcher to verify the registrar still correctly notifies the
427        // second and third watchers
428        watcher_stream1.control_handle().shutdown();
429
430        registrar
431            .notify_reboot_watchers(
432                RebootReasons::new(fpower::RebootReason2::HighTemperature),
433                seconds(0.0),
434            )
435            .await;
436
437        // The first watcher should get None because its channel was closed
438        match watcher_stream1.try_next().await {
439            Ok(None) => {}
440            e => panic!("Unexpected watcher_stream1 result: {:?}", e),
441        };
442
443        // Verify the watcher received the correct OnReboot request
444        match watcher_stream2.try_next().await {
445            Ok(Some(fpower::RebootWatcherRequest::OnReboot {
446                options: fpower::RebootOptions { reasons: Some(reasons), .. },
447                ..
448            })) => {
449                assert_eq!(&reasons[..], [fpower::RebootReason2::HighTemperature])
450            }
451            e => panic!("Unexpected watcher_stream2 result: {:?}", e),
452        };
453
454        // Verify the watcher received the correct OnReboot request
455        match watcher_stream3.try_next().await {
456            Ok(Some(fpower::RebootWatcherRequest::OnReboot {
457                options: fpower::RebootOptions { reasons: Some(reasons), .. },
458                ..
459            })) => assert_eq!(&reasons[..], [fpower::RebootReason2::HighTemperature]),
460            e => panic!("Unexpected watcher_stream3 result: {:?}", e),
461        };
462    }
463
464    #[test]
465    fn test_watcher_response_delay() {
466        let mut exec = fasync::TestExecutor::new();
467        let registrar = ShutdownWatcher::new();
468
469        // Register the reboot watcher
470        let (watcher_proxy, mut watcher_stream) =
471            fidl::endpoints::create_proxy_and_stream::<fpower::RebootWatcherMarker>();
472        let fut = async {
473            registrar.add_reboot_watcher(watcher_proxy).await;
474            assert_eq!(registrar.reboot_watchers.lock().await.len(), 1);
475        };
476        exec.run_singlethreaded(fut);
477
478        // Set up the notify future
479        let notify_future = registrar.notify_reboot_watchers(
480            RebootReasons::new(fpower::RebootReason2::HighTemperature),
481            seconds(1.0),
482        );
483        futures::pin_mut!(notify_future);
484
485        // Verify that the notify future can't complete on the first attempt (because the watcher
486        // will not have responded)
487        assert!(exec.run_until_stalled(&mut notify_future).is_pending());
488
489        // Ack the reboot notification, allowing the shutdown flow to continue
490        let fpower::RebootWatcherRequest::OnReboot { responder, .. } =
491            exec.run_singlethreaded(&mut watcher_stream.try_next()).unwrap().unwrap();
492        assert_matches!(responder.send(), Ok(()));
493
494        // Verify the notify future can now complete
495        assert!(exec.run_until_stalled(&mut notify_future).is_ready());
496    }
497
498    /// Tests that a reboot watcher is able to delay the shutdown but will time out after the
499    /// expected duration. The test also verifies that when a watcher times out, it is removed
500    /// from the list of registered reboot watchers.
501    #[test]
502    fn test_watcher_response_timeout() {
503        let mut exec = fasync::TestExecutor::new_with_fake_time();
504        let registrar = ShutdownWatcher::new();
505        exec.set_fake_time(fasync::MonotonicInstant::from_nanos(0));
506
507        // Register the reboot watcher
508        let (watcher_proxy, _watcher_stream) =
509            fidl::endpoints::create_proxy_and_stream::<fpower::RebootWatcherMarker>();
510        let fut = async {
511            registrar.add_reboot_watcher(watcher_proxy).await;
512            assert_eq!(registrar.reboot_watchers.lock().await.len(), 1);
513        };
514        futures::pin_mut!(fut);
515        exec.run_until_stalled(&mut fut).is_ready();
516
517        // Set up the notify future
518        let notify_future = registrar.notify_reboot_watchers(
519            RebootReasons::new(fpower::RebootReason2::HighTemperature),
520            seconds(1.0),
521        );
522        futures::pin_mut!(notify_future);
523
524        // Verify that the notify future can't complete on the first attempt (because the watcher
525        // will not have responded)
526        assert!(exec.run_until_stalled(&mut notify_future).is_pending());
527
528        // Wake the timer that causes the watcher timeout to fire
529        assert_eq!(exec.wake_next_timer(), Some(fasync::MonotonicInstant::from_nanos(1e9 as i64)));
530
531        // Verify the notify future can now complete
532        assert!(exec.run_until_stalled(&mut notify_future).is_ready());
533
534        // Since the watcher timed out, verify it is removed from `reboot_watchers`
535        let fut = async {
536            assert_eq!(registrar.reboot_watchers.lock().await.len(), 0);
537        };
538        futures::pin_mut!(fut);
539        exec.run_until_stalled(&mut fut).is_ready();
540    }
541
542    /// Tests that an unsuccessful RebootWatcher registration results in the
543    /// RebootMethodsWatcherRegister channel being closed.
544    #[fasync::run_singlethreaded(test)]
545    async fn test_watcher_register_fail() {
546        let registrar = ShutdownWatcher::new();
547
548        // Create the registration proxy/stream
549        let (register_proxy, register_stream) = fidl::endpoints::create_proxy_and_stream::<
550            fpower::RebootMethodsWatcherRegisterMarker,
551        >();
552
553        // Start the RebootMethodsWatcherRegister server that will handle Register requests from
554        // `register_proxy`
555        fasync::Task::local(async move {
556            registrar.handle_reboot_register_request(register_stream).await;
557        })
558        .detach();
559
560        // Send an invalid request to the server to force a failure
561        assert_matches!(register_proxy.as_channel().write(&[], &mut []), Ok(()));
562
563        // Verify the RebootMethodsWatcherRegister channel is closed
564        assert_matches!(register_proxy.on_closed().await, Ok(zx::Signals::CHANNEL_PEER_CLOSED));
565    }
566}