shutdown_shim/
shutdown_watcher.rs

1// Copyright 2020 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::reboot_reasons::RebootReasons;
6
7use either::Either;
8use fidl::endpoints::Proxy;
9use fidl_fuchsia_hardware_power_statecontrol as fpower;
10use fuchsia_async::{self as fasync, DurationExt, TimeoutExt};
11use fuchsia_inspect::{self as inspect, NumericProperty};
12use futures::lock::Mutex;
13use futures::prelude::*;
14use futures::TryStreamExt;
15use std::collections::HashMap;
16use std::sync::Arc;
17use zx::AsHandleRef;
18
19/// Summary: Provides an implementation of the
20/// fuchsia.hardware.power.statecontrol.RebootMethodsWatcherRegister protocol that allows other
21/// components in the system to register to be notified of pending system shutdown requests and the
22/// associated shutdown reason.
23///
24/// FIDL dependencies:
25///     - fuchsia.hardware.power.statecontrol.RebootMethodsWatcherRegister: the serverprovides a
26///       Register API that other components in the system can use
27///       to receive notifications about system shutdown events and reasons
28///     - fuchsia.hardware.power.statecontrol.RebootMethodsWatcher: the server receives an instance
29///       of this protocol over the RebootMethodsWatcherRegister channel, and uses this channel to
30///       send shutdown notifications
31pub struct ShutdownWatcher {
32    /// Contains all the registered RebootMethodsWatcher channels to be notified when a reboot
33    /// request is received.
34    reboot_watchers: Arc<
35        Mutex<
36            // TODO(https://fxbug.dev/385742868): The value is the proxy (either
37            // deprecated or not). Clean this up once the deprecated version is
38            // removed from the API.
39            HashMap<u32, Either<fpower::RebootMethodsWatcherProxy, fpower::RebootWatcherProxy>>,
40        >,
41    >,
42    inspect: Arc<Mutex<InspectData>>,
43}
44
45impl ShutdownWatcher {
46    const NOTIFY_RESPONSE_TIMEOUT: zx::MonotonicDuration = zx::MonotonicDuration::from_seconds(
47        fpower::MAX_REBOOT_WATCHER_RESPONSE_TIME_SECONDS as i64,
48    );
49
50    #[cfg(test)]
51    fn new() -> Arc<Self> {
52        let inspector = inspect::Inspector::new(fuchsia_inspect::InspectorConfig::default());
53        Self::new_with_inspector(&inspector)
54    }
55
56    pub fn new_with_inspector(inspector: &inspect::Inspector) -> Arc<Self> {
57        Arc::new(Self {
58            reboot_watchers: Arc::new(Mutex::new(HashMap::new())),
59            inspect: Arc::new(Mutex::new(InspectData::new(
60                inspector.root(),
61                "ShutdownWatcher".to_string(),
62            ))),
63        })
64    }
65
66    /// Handles a new client connection to the RebootMethodsWatcherRegister service.
67    pub async fn handle_reboot_register_request(
68        self: Arc<Self>,
69        mut stream: fpower::RebootMethodsWatcherRegisterRequestStream,
70    ) {
71        while let Ok(Some(req)) = stream.try_next().await {
72            match req {
73                // TODO(https://fxbug.dev/385742868): Delete this method
74                // once it's removed from the API.
75                fpower::RebootMethodsWatcherRegisterRequest::Register {
76                    watcher,
77                    control_handle: _,
78                } => {
79                    self.add_deprecated_reboot_watcher(watcher.into_proxy()).await;
80                }
81                // TODO(https://fxbug.dev/385742868): Delete this method
82                // once it's removed from the API.
83                fpower::RebootMethodsWatcherRegisterRequest::RegisterWithAck {
84                    watcher,
85                    responder,
86                } => {
87                    self.add_deprecated_reboot_watcher(watcher.into_proxy()).await;
88                    let _ = responder.send();
89                }
90                fpower::RebootMethodsWatcherRegisterRequest::RegisterWatcher {
91                    watcher,
92                    responder,
93                } => {
94                    self.add_reboot_watcher(watcher.into_proxy()).await;
95                    let _ = responder.send();
96                }
97            }
98        }
99    }
100
101    /// Adds a new RebootMethodsWatcher channel to the list of registered watchers.
102    async fn add_deprecated_reboot_watcher(&self, watcher: fpower::RebootMethodsWatcherProxy) {
103        fuchsia_trace::duration!(
104            c"shutdown-shim",
105            c"ShutdownWatcher::add_deprecated_reboot_watcher",
106            "watcher" => watcher.as_channel().raw_handle()
107        );
108
109        println!("[shutdown-shim] Adding a deprecated reboot watcher");
110        // If the client closes the watcher channel, remove it from our `reboot_watchers` map and
111        // notify all clients
112        let key = watcher.as_channel().raw_handle();
113        let proxy = watcher.clone();
114        let reboot_watchers = self.reboot_watchers.clone();
115        let inspect = self.inspect.clone();
116        fasync::Task::spawn(async move {
117            let _ = proxy.on_closed().await;
118            {
119                reboot_watchers.lock().await.remove(&key);
120            }
121            inspect.lock().await.remove_reboot_watcher();
122        })
123        .detach();
124
125        {
126            let mut watchers_mut = self.reboot_watchers.lock().await;
127            watchers_mut.insert(key, Either::Left(watcher));
128        }
129        self.inspect.lock().await.add_reboot_watcher();
130    }
131
132    /// Adds a new RebootMethodsWatcher channel to the list of registered watchers.
133    async fn add_reboot_watcher(&self, watcher: fpower::RebootWatcherProxy) {
134        fuchsia_trace::duration!(
135            c"shutdown-shim",
136            c"ShutdownWatcher::add_reboot_watcher",
137            "watcher" => watcher.as_channel().raw_handle()
138        );
139
140        // If the client closes the watcher channel, remove it from our `reboot_watchers` map
141        println!("[shutdown-shim] Adding a reboot watcher");
142        let key = watcher.as_channel().raw_handle();
143        let proxy = watcher.clone();
144        let reboot_watchers = self.reboot_watchers.clone();
145        let inspect = self.inspect.clone();
146        fasync::Task::spawn(async move {
147            let _ = proxy.on_closed().await;
148            {
149                reboot_watchers.lock().await.remove(&key);
150            }
151            inspect.lock().await.remove_reboot_watcher();
152        })
153        .detach();
154
155        {
156            let mut watchers_mut = self.reboot_watchers.lock().await;
157            watchers_mut.insert(key, Either::Right(watcher));
158        }
159        self.inspect.lock().await.add_reboot_watcher();
160    }
161
162    /// Handles the SystemShutdown message by notifying the appropriate registered watchers.
163    pub async fn handle_system_shutdown_message(&self, reasons: RebootReasons) {
164        self.notify_reboot_watchers(reasons, Self::NOTIFY_RESPONSE_TIMEOUT).await
165    }
166
167    async fn notify_reboot_watchers(&self, reasons: RebootReasons, timeout: zx::MonotonicDuration) {
168        // Instead of waiting for https://fxbug.dev/42120903, use begin and end pair of macros.
169        fuchsia_trace::duration_begin!(
170            c"shutdown-shim",
171            c"ShutdownWatcher::notify_reboot_watchers",
172            "reasons" => format!("{:?}", reasons).as_str()
173        );
174
175        // Create a future for each watcher that calls the watcher's `on_reboot` method and returns
176        // the watcher proxy if the response was received within the timeout, or None otherwise. We
177        // take this approach so that watchers that timed out have their channel dropped
178        // (https://fxbug.dev/42131208).
179        let watcher_futures = {
180            // Take the current watchers out of the RefCell because we'll be modifying the vector
181            let watchers = self.reboot_watchers.lock().await;
182            println!("[shutdown-shim] notifying {:?} watchers of reboot", watchers.len());
183            watchers.clone().into_iter().map(|(key, watcher_proxy)| {
184                let reasons = reasons.clone();
185                async move {
186                    let deadline = timeout.after_now();
187                    let result = match &watcher_proxy {
188                        Either::Left(proxy) => {
189                            futures::future::Either::Left(proxy.on_reboot(reasons.to_deprecated()))
190                        }
191                        Either::Right(proxy) => {
192                            futures::future::Either::Right(proxy.on_reboot(&reasons.into()))
193                        }
194                    }
195                    .map_err(|_| ())
196                    .on_timeout(deadline, || Err(()))
197                    .await;
198
199                    match result {
200                        Ok(()) => Some((key, watcher_proxy)),
201                        Err(()) => None,
202                    }
203                }
204            })
205        };
206
207        // Run all of the futures, collecting the successful watcher proxies into a vector
208        let new_watchers = futures::future::join_all(watcher_futures)
209            .await
210            .into_iter()
211            .filter_map(|watcher_opt| watcher_opt) // Unwrap the Options while filtering out None
212            .collect();
213
214        // Repopulate the successful watcher proxies back into the `reboot_watchers` RefCell
215        *self.reboot_watchers.lock().await = new_watchers;
216
217        fuchsia_trace::duration_end!(
218            c"shutdown-shim",
219            c"ShutdownWatcher::notify_reboot_watchers",
220            "reasons" => format!("{:?}", reasons).as_str()
221        );
222    }
223}
224
225struct InspectData {
226    reboot_watcher_current_connections: inspect::UintProperty,
227    reboot_watcher_total_connections: inspect::UintProperty,
228}
229
230impl InspectData {
231    fn new(parent: &inspect::Node, name: String) -> Self {
232        // Create a local root node and properties
233        let root = parent.create_child(name);
234        let reboot_watcher_current_connections =
235            root.create_uint("reboot_watcher_current_connections", 0);
236        let reboot_watcher_total_connections =
237            root.create_uint("reboot_watcher_total_connections", 0);
238
239        // Pass ownership of the new node to the parent node, otherwise it'll be dropped
240        parent.record(root);
241
242        InspectData { reboot_watcher_current_connections, reboot_watcher_total_connections }
243    }
244
245    fn add_reboot_watcher(&self) {
246        self.reboot_watcher_current_connections.add(1);
247        self.reboot_watcher_total_connections.add(1);
248    }
249
250    fn remove_reboot_watcher(&self) {
251        self.reboot_watcher_current_connections.subtract(1);
252    }
253}
254
255#[cfg(test)]
256mod tests {
257    use super::*;
258    use assert_matches::assert_matches;
259    use diagnostics_assertions::assert_data_tree;
260    use fidl::endpoints::{ControlHandle, RequestStream};
261
262    // In this test, convert float to integer for simpilification
263    fn seconds(seconds: f64) -> zx::MonotonicDuration {
264        zx::MonotonicDuration::from_seconds(seconds as i64)
265    }
266
267    fn run_all_tasks_until_stalled(executor: &mut fuchsia_async::TestExecutor) {
268        assert!(executor.run_until_stalled(&mut futures::future::pending::<()>()).is_pending());
269    }
270
271    /// Tests for the presence and correctness of inspect data
272    #[test]
273    fn test_inspect_data() {
274        let mut exec = fasync::TestExecutor::new();
275        let inspector = inspect::Inspector::default();
276        let registrar = ShutdownWatcher::new_with_inspector(&inspector);
277
278        assert_data_tree!(
279            inspector,
280            root: {
281                ShutdownWatcher: {
282                    reboot_watcher_current_connections: 0u64,
283                    reboot_watcher_total_connections: 0u64
284                }
285            }
286        );
287
288        let (watcher_proxy, _) = fidl::endpoints::create_proxy::<fpower::RebootWatcherMarker>();
289        let fut = async {
290            registrar.add_reboot_watcher(watcher_proxy.clone()).await;
291        };
292        futures::pin_mut!(fut);
293        exec.run_until_stalled(&mut fut).is_ready();
294
295        assert_data_tree!(
296            inspector,
297            root: {
298                ShutdownWatcher: {
299                    reboot_watcher_current_connections: 1u64,
300                    reboot_watcher_total_connections: 1u64
301                }
302            }
303        );
304
305        drop(fut);
306        run_all_tasks_until_stalled(&mut exec);
307
308        assert_data_tree!(
309            inspector,
310            root: {
311                ShutdownWatcher: {
312                    reboot_watcher_current_connections: 0u64,
313                    reboot_watcher_total_connections: 1u64
314                }
315            }
316        );
317
318        let (watcher_proxy, _) = fidl::endpoints::create_proxy::<fpower::RebootWatcherMarker>();
319        let fut = async {
320            registrar.add_reboot_watcher(watcher_proxy.clone()).await;
321        };
322        futures::pin_mut!(fut);
323        exec.run_until_stalled(&mut fut).is_ready();
324
325        assert_data_tree!(
326            inspector,
327            root: {
328                ShutdownWatcher: {
329                    reboot_watcher_current_connections: 1u64,
330                    reboot_watcher_total_connections: 2u64
331                }
332            }
333        );
334
335        drop(fut);
336        run_all_tasks_until_stalled(&mut exec);
337
338        assert_data_tree!(
339            inspector,
340            root: {
341                ShutdownWatcher: {
342                    reboot_watcher_current_connections: 0u64,
343                    reboot_watcher_total_connections: 2u64
344                }
345            }
346        );
347    }
348
349    /// Tests that a client can successfully register a reboot watcher, and the registered watcher
350    /// receives the expected reboot notification.
351    #[fasync::run_singlethreaded(test)]
352    async fn test_add_reboot_watcher() {
353        let registrar = ShutdownWatcher::new();
354
355        // Create the proxy/stream to register the watcher
356        let (register_proxy, register_stream) = fidl::endpoints::create_proxy_and_stream::<
357            fpower::RebootMethodsWatcherRegisterMarker,
358        >();
359
360        // Start the RebootMethodsWatcherRegister server that will handle Register calls from
361        // register_proxy
362        let registrar_clone = registrar.clone();
363        fasync::Task::local(async move {
364            registrar_clone.handle_reboot_register_request(register_stream).await;
365        })
366        .detach();
367
368        // Create the watcher proxy/stream to receive reboot notifications
369        let (watcher_client, mut watcher_stream) =
370            fidl::endpoints::create_request_stream::<fpower::RebootWatcherMarker>();
371
372        // Call the Register API, passing in the watcher_client end
373        assert_matches!(register_proxy.register_watcher(watcher_client).await, Ok(()));
374        // Signal the watchers
375        registrar
376            .notify_reboot_watchers(
377                RebootReasons::new(fpower::RebootReason2::UserRequest),
378                seconds(0.0),
379            )
380            .await;
381
382        // Verify the watcher_stream gets the correct reboot notification
383        let reasons = assert_matches!(
384            watcher_stream.try_next().await.unwrap().unwrap(),
385            fpower::RebootWatcherRequest::OnReboot {
386                options: fpower::RebootOptions{reasons: Some(reasons), ..},
387                ..
388            } => reasons
389        );
390        assert_eq!(&reasons[..], [fpower::RebootReason2::UserRequest]);
391    }
392
393    /// Tests that a reboot watcher is delivered the correct reboot reason
394    #[fasync::run_singlethreaded(test)]
395    async fn test_reboot_watcher_reason() {
396        let registrar = ShutdownWatcher::new();
397        let (watcher_proxy, mut watcher_stream) =
398            fidl::endpoints::create_proxy_and_stream::<fpower::RebootWatcherMarker>();
399        registrar.add_reboot_watcher(watcher_proxy).await;
400        registrar
401            .notify_reboot_watchers(
402                RebootReasons::new(fpower::RebootReason2::HighTemperature),
403                seconds(0.0),
404            )
405            .await;
406
407        let reasons = match watcher_stream.try_next().await {
408            Ok(Some(fpower::RebootWatcherRequest::OnReboot {
409                options: fpower::RebootOptions { reasons: Some(reasons), .. },
410                ..
411            })) => reasons,
412            e => panic!("Unexpected watcher_stream result: {:?}", e),
413        };
414
415        assert_eq!(&reasons[..], [fpower::RebootReason2::HighTemperature]);
416    }
417
418    /// Tests that if there are multiple registered reboot watchers, each one will receive the
419    /// expected reboot notification.
420    #[fasync::run_singlethreaded(test)]
421    async fn test_multiple_reboot_watchers() {
422        let registrar = ShutdownWatcher::new();
423
424        // Create three separate reboot watchers
425        let (watcher_proxy1, mut watcher_stream1) =
426            fidl::endpoints::create_proxy_and_stream::<fpower::RebootWatcherMarker>();
427        registrar.add_reboot_watcher(watcher_proxy1).await;
428
429        let (watcher_proxy2, mut watcher_stream2) =
430            fidl::endpoints::create_proxy_and_stream::<fpower::RebootWatcherMarker>();
431        registrar.add_reboot_watcher(watcher_proxy2).await;
432
433        let (watcher_proxy3, mut watcher_stream3) =
434            fidl::endpoints::create_proxy_and_stream::<fpower::RebootWatcherMarker>();
435        registrar.add_reboot_watcher(watcher_proxy3).await;
436
437        // Close the channel of the first watcher to verify the registrar still correctly notifies the
438        // second and third watchers
439        watcher_stream1.control_handle().shutdown();
440
441        registrar
442            .notify_reboot_watchers(
443                RebootReasons::new(fpower::RebootReason2::HighTemperature),
444                seconds(0.0),
445            )
446            .await;
447
448        // The first watcher should get None because its channel was closed
449        match watcher_stream1.try_next().await {
450            Ok(None) => {}
451            e => panic!("Unexpected watcher_stream1 result: {:?}", e),
452        };
453
454        // Verify the watcher received the correct OnReboot request
455        match watcher_stream2.try_next().await {
456            Ok(Some(fpower::RebootWatcherRequest::OnReboot {
457                options: fpower::RebootOptions { reasons: Some(reasons), .. },
458                ..
459            })) => {
460                assert_eq!(&reasons[..], [fpower::RebootReason2::HighTemperature])
461            }
462            e => panic!("Unexpected watcher_stream2 result: {:?}", e),
463        };
464
465        // Verify the watcher received the correct OnReboot request
466        match watcher_stream3.try_next().await {
467            Ok(Some(fpower::RebootWatcherRequest::OnReboot {
468                options: fpower::RebootOptions { reasons: Some(reasons), .. },
469                ..
470            })) => assert_eq!(&reasons[..], [fpower::RebootReason2::HighTemperature]),
471            e => panic!("Unexpected watcher_stream3 result: {:?}", e),
472        };
473    }
474
475    #[test]
476    fn test_watcher_response_delay() {
477        let mut exec = fasync::TestExecutor::new();
478        let registrar = ShutdownWatcher::new();
479
480        // Register the reboot watcher
481        let (watcher_proxy, mut watcher_stream) =
482            fidl::endpoints::create_proxy_and_stream::<fpower::RebootWatcherMarker>();
483        let fut = async {
484            registrar.add_reboot_watcher(watcher_proxy).await;
485            assert_eq!(registrar.reboot_watchers.lock().await.len(), 1);
486        };
487        exec.run_singlethreaded(fut);
488
489        // Set up the notify future
490        let notify_future = registrar.notify_reboot_watchers(
491            RebootReasons::new(fpower::RebootReason2::HighTemperature),
492            seconds(1.0),
493        );
494        futures::pin_mut!(notify_future);
495
496        // Verify that the notify future can't complete on the first attempt (because the watcher
497        // will not have responded)
498        assert!(exec.run_until_stalled(&mut notify_future).is_pending());
499
500        // Ack the reboot notification, allowing the shutdown flow to continue
501        let fpower::RebootWatcherRequest::OnReboot { responder, .. } =
502            exec.run_singlethreaded(&mut watcher_stream.try_next()).unwrap().unwrap();
503        assert_matches!(responder.send(), Ok(()));
504
505        // Verify the notify future can now complete
506        assert!(exec.run_until_stalled(&mut notify_future).is_ready());
507    }
508
509    /// Tests that a reboot watcher is able to delay the shutdown but will time out after the
510    /// expected duration. The test also verifies that when a watcher times out, it is removed
511    /// from the list of registered reboot watchers.
512    #[test]
513    fn test_watcher_response_timeout() {
514        let mut exec = fasync::TestExecutor::new_with_fake_time();
515        let registrar = ShutdownWatcher::new();
516        exec.set_fake_time(fasync::MonotonicInstant::from_nanos(0));
517
518        // Register the reboot watcher
519        let (watcher_proxy, _watcher_stream) =
520            fidl::endpoints::create_proxy_and_stream::<fpower::RebootWatcherMarker>();
521        let fut = async {
522            registrar.add_reboot_watcher(watcher_proxy).await;
523            assert_eq!(registrar.reboot_watchers.lock().await.len(), 1);
524        };
525        futures::pin_mut!(fut);
526        exec.run_until_stalled(&mut fut).is_ready();
527
528        // Set up the notify future
529        let notify_future = registrar.notify_reboot_watchers(
530            RebootReasons::new(fpower::RebootReason2::HighTemperature),
531            seconds(1.0),
532        );
533        futures::pin_mut!(notify_future);
534
535        // Verify that the notify future can't complete on the first attempt (because the watcher
536        // will not have responded)
537        assert!(exec.run_until_stalled(&mut notify_future).is_pending());
538
539        // Wake the timer that causes the watcher timeout to fire
540        assert_eq!(exec.wake_next_timer(), Some(fasync::MonotonicInstant::from_nanos(1e9 as i64)));
541
542        // Verify the notify future can now complete
543        assert!(exec.run_until_stalled(&mut notify_future).is_ready());
544
545        // Since the watcher timed out, verify it is removed from `reboot_watchers`
546        let fut = async {
547            assert_eq!(registrar.reboot_watchers.lock().await.len(), 0);
548        };
549        futures::pin_mut!(fut);
550        exec.run_until_stalled(&mut fut).is_ready();
551    }
552
553    /// Tests that an unsuccessful RebootWatcher registration results in the
554    /// RebootMethodsWatcherRegister channel being closed.
555    #[fasync::run_singlethreaded(test)]
556    async fn test_watcher_register_fail() {
557        let registrar = ShutdownWatcher::new();
558
559        // Create the registration proxy/stream
560        let (register_proxy, register_stream) = fidl::endpoints::create_proxy_and_stream::<
561            fpower::RebootMethodsWatcherRegisterMarker,
562        >();
563
564        // Start the RebootMethodsWatcherRegister server that will handle Register requests from
565        // `register_proxy`
566        fasync::Task::local(async move {
567            registrar.handle_reboot_register_request(register_stream).await;
568        })
569        .detach();
570
571        // Send an invalid request to the server to force a failure
572        assert_matches!(register_proxy.as_channel().write(&[], &mut []), Ok(()));
573
574        // Verify the RebootMethodsWatcherRegister channel is closed
575        assert_matches!(register_proxy.on_closed().await, Ok(zx::Signals::CHANNEL_PEER_CLOSED));
576    }
577}