persistence/
lib.rs

1// Copyright 2020 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! `diagnostics-persistence` component persists Inspect VMOs and serves them at the next boot.
6
7mod fetcher;
8mod file_handler;
9mod inspect_server;
10mod scheduler;
11
12use anyhow::{Context, Error};
13use argh::FromArgs;
14use fidl::endpoints;
15use fuchsia_inspect::component;
16use fuchsia_inspect::health::Reporter;
17use fuchsia_runtime::{HandleInfo, HandleType};
18use futures::{StreamExt, TryStreamExt};
19use log::*;
20use persistence_build_config::Config as BuildConfig;
21use scheduler::Scheduler;
22use zx::BootInstant;
23use {
24    fidl_fuchsia_process_lifecycle as flifecycle, fidl_fuchsia_update as fupdate,
25    fuchsia_async as fasync,
26};
27
28/// The name of the subcommand and the logs-tag.
29pub const PROGRAM_NAME: &str = "persistence";
30pub const PERSIST_NODE_NAME: &str = "persist";
31/// Added after persisted data is fully published
32pub const PUBLISHED_TIME_KEY: &str = "published";
33
34/// Command line args
35#[derive(FromArgs, Debug, PartialEq)]
36#[argh(subcommand, name = "persistence")]
37pub struct CommandLine {}
38
39pub async fn main(_args: CommandLine) -> Result<(), Error> {
40    info!("Starting Diagnostics Persistence Service service");
41    let lifecycle =
42        fuchsia_runtime::take_startup_handle(HandleInfo::new(HandleType::Lifecycle, 0)).unwrap();
43    let lifecycle: zx::Channel = lifecycle.into();
44    let lifecycle: endpoints::ServerEnd<flifecycle::LifecycleMarker> = lifecycle.into();
45    let (mut lifecycle_request_stream, _) = lifecycle.into_stream_and_control_handle();
46    let lifecycle_task = async move {
47        match lifecycle_request_stream.next().await {
48            Some(Ok(flifecycle::LifecycleRequest::Stop { .. })) => {
49                debug!("Received stop request");
50            }
51            Some(Err(e)) => {
52                error!("Received FIDL error from Lifecycle: {e:?}");
53                std::future::pending::<()>().await
54            }
55            None => {
56                debug!("Lifecycle request stream closed");
57                std::future::pending::<()>().await
58            }
59        }
60    };
61
62    let mut health = component::health();
63    let config = persistence_config::load_configuration_files().context("Error loading configs")?;
64    let build_config = BuildConfig::take_from_startup_handle();
65    let inspector = component::inspector();
66    inspector.root().record_child("config", |config_node| build_config.record_inspect(config_node));
67    let _inspect_server_task =
68        inspect_runtime::publish(inspector, inspect_runtime::PublishOptions::default());
69
70    file_handler::forget_old_data(&config)?;
71
72    // Add a persistence fidl service for each service defined in the config files.
73    let scope = fasync::Scope::new();
74    Scheduler::spawn(scope.to_handle(), &config).await.context("Error creating scheduler")?;
75
76    // Before serving previous data, wait until the post-boot system update check has finished.
77    // Note: We're already accepting persist requests. If we receive a request, store
78    // some data, and then cache is cleared after data is persisted, that data will be lost. This
79    // is correct behavior - we don't want to remember anything from before the cache was cleared.
80    scope.spawn(async move {
81        if build_config.skip_update_check {
82            info!("Skipping the update check, publishing previous boot data");
83        } else if let Err(e) = wait_for_update().await {
84            warn!(e:?; "Will not publish previous boot data");
85            return;
86        }
87
88        inspector.root().record_child(PERSIST_NODE_NAME, |node| {
89            if let Err(e) = inspect_server::serve_persisted_data(node) {
90                error!("Failed to serve persisted data: {e}");
91            }
92            health.set_ok();
93            info!("Diagnostics Persistence Service ready");
94        });
95        inspector.root().record_int(PUBLISHED_TIME_KEY, BootInstant::get().into_nanos());
96    });
97
98    lifecycle_task.await;
99    info!("Stopping due to lifecycle request");
100    scope.cancel().await;
101
102    Ok(())
103}
104
105async fn wait_for_update() -> Result<(), Error> {
106    info!("Waiting for post-boot update check...");
107    let (notifier_client, mut notifier_request_stream) =
108        fidl::endpoints::create_request_stream::<fupdate::NotifierMarker>();
109    match fuchsia_component::client::connect_to_protocol::<fupdate::ListenerMarker>() {
110        Ok(proxy) => {
111            proxy.notify_on_first_update_check(
112                fupdate::ListenerNotifyOnFirstUpdateCheckRequest {
113                    notifier: Some(notifier_client),
114                    ..Default::default()
115                },
116            )?;
117        }
118        Err(e) => {
119            warn!(
120                e:?;
121                "Unable to connect to fuchsia.update.Listener; will publish immediately"
122            );
123
124            return Ok(());
125        }
126    }
127
128    match notifier_request_stream.try_next().await {
129        Ok(Some(fupdate::NotifierRequest::Notify { control_handle: _ })) => {}
130        Ok(None) => {
131            return Err(anyhow::anyhow!("Did not receive update notification; not publishing"));
132        }
133        Err(e) => {
134            return Err(anyhow::anyhow!(
135                "Error waiting for update notification; not publishing: {e}"
136            ));
137        }
138    }
139
140    // Start serving previous boot data
141    info!("...Update check has completed; publishing previous boot data");
142    Ok(())
143}