Skip to main content

gpt_component/
service.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::config::Config;
6use crate::gpt::GptManager;
7use anyhow::{Context as _, Error};
8use block_client::RemoteBlockClient;
9use fidl::endpoints::{DiscoverableProtocolMarker as _, RequestStream as _, ServiceMarker as _};
10use fidl_fuchsia_fs as ffs;
11use fidl_fuchsia_fs_startup as fstartup;
12use fidl_fuchsia_io as fio;
13use fidl_fuchsia_process_lifecycle as flifecycle;
14use fidl_fuchsia_storage_block as fblock;
15use fidl_fuchsia_storage_partitions as fpartitions;
16use fuchsia_async as fasync;
17use futures::lock::Mutex as AsyncMutex;
18use futures::stream::TryStreamExt as _;
19use std::sync::Arc;
20use vfs::directory::helper::DirectlyMutable as _;
21use vfs::execution_scope::ExecutionScope;
22
23pub struct GptService {
24    state: AsyncMutex<State>,
25
26    // The execution scope of the pseudo filesystem.
27    scope: ExecutionScope,
28
29    // The root of the pseudo filesystem for the component.
30    export_dir: Arc<vfs::directory::immutable::Simple>,
31
32    // A directory where partitions are published.
33    partitions_dir: Arc<vfs::directory::immutable::Simple>,
34}
35
36#[derive(Default)]
37enum State {
38    #[default]
39    Stopped,
40    /// The GPT is malformed and needs to be reformatted with ResetPartitionTables before it can be
41    /// used.  The component will publish an empty partitions directory.
42    NeedsFormatting(Config, fblock::BlockProxy),
43    Running(Arc<GptManager>),
44}
45
46impl State {
47    fn is_stopped(&self) -> bool {
48        if let Self::Stopped = self { true } else { false }
49    }
50}
51
52impl GptService {
53    pub fn new() -> Arc<Self> {
54        let export_dir = vfs::directory::immutable::simple();
55        let partitions_dir = vfs::directory::immutable::simple();
56        Arc::new(Self {
57            state: Default::default(),
58            scope: ExecutionScope::new(),
59            export_dir,
60            partitions_dir,
61        })
62    }
63
64    pub async fn run(
65        self: Arc<Self>,
66        outgoing_dir: zx::Channel,
67        lifecycle_channel: Option<zx::Channel>,
68    ) -> Result<(), Error> {
69        let svc_dir = vfs::directory::immutable::simple();
70        self.export_dir.add_entry("svc", svc_dir.clone()).expect("Unable to create svc dir");
71
72        svc_dir
73            .add_entry(
74                fpartitions::PartitionServiceMarker::SERVICE_NAME,
75                self.partitions_dir.clone(),
76            )
77            .unwrap();
78        let weak = Arc::downgrade(&self);
79        svc_dir
80            .add_entry(
81                fstartup::StartupMarker::PROTOCOL_NAME,
82                vfs::service::host(move |requests| {
83                    let weak = weak.clone();
84                    async move {
85                        if let Some(me) = weak.upgrade() {
86                            let _ = me.handle_start_requests(requests).await;
87                        }
88                    }
89                }),
90            )
91            .unwrap();
92        let weak = Arc::downgrade(&self);
93        svc_dir
94            .add_entry(
95                fpartitions::PartitionsAdminMarker::PROTOCOL_NAME,
96                vfs::service::host(move |requests| {
97                    let weak = weak.clone();
98                    async move {
99                        if let Some(me) = weak.upgrade() {
100                            let _ = me.handle_partitions_admin_requests(requests).await;
101                        }
102                    }
103                }),
104            )
105            .unwrap();
106        let weak = Arc::downgrade(&self);
107        svc_dir
108            .add_entry(
109                fpartitions::PartitionsManagerMarker::PROTOCOL_NAME,
110                vfs::service::host(move |requests| {
111                    let weak = weak.clone();
112                    async move {
113                        if let Some(me) = weak.upgrade() {
114                            let _ = me.handle_partitions_manager_requests(requests).await;
115                        }
116                    }
117                }),
118            )
119            .unwrap();
120        let weak = Arc::downgrade(&self);
121        svc_dir
122            .add_entry(
123                ffs::AdminMarker::PROTOCOL_NAME,
124                vfs::service::host(move |requests| {
125                    let weak = weak.clone();
126                    async move {
127                        if let Some(me) = weak.upgrade() {
128                            let _ = me.handle_admin_requests(requests).await;
129                        }
130                    }
131                }),
132            )
133            .unwrap();
134
135        vfs::directory::serve_on(
136            self.export_dir.clone(),
137            fio::PERM_READABLE | fio::PERM_WRITABLE | fio::PERM_EXECUTABLE,
138            self.scope.clone(),
139            fidl::endpoints::ServerEnd::new(outgoing_dir),
140        );
141
142        if let Some(channel) = lifecycle_channel {
143            let me = self.clone();
144            self.scope.spawn(async move {
145                if let Err(e) = me.handle_lifecycle_requests(channel).await {
146                    log::warn!(error:? = e; "handle_lifecycle_requests");
147                }
148            });
149        }
150
151        self.scope.wait().await;
152
153        Ok(())
154    }
155
156    async fn handle_start_requests(
157        self: Arc<Self>,
158        mut stream: fstartup::StartupRequestStream,
159    ) -> Result<(), Error> {
160        while let Some(request) = stream.try_next().await.context("Reading request")? {
161            log::debug!(request:?; "");
162            match request {
163                fstartup::StartupRequest::Start { device, options, responder } => {
164                    responder
165                        .send(
166                            self.start(device.into_proxy(), options.into())
167                                .await
168                                .map_err(|status| status.into_raw()),
169                        )
170                        .unwrap_or_else(|e| log::error!(e:?; "Failed to send Start response"));
171                }
172                fstartup::StartupRequest::Format { responder, .. } => {
173                    responder
174                        .send(Err(zx::Status::NOT_SUPPORTED.into_raw()))
175                        .unwrap_or_else(|e| log::error!(e:?; "Failed to send Check response"));
176                }
177                fstartup::StartupRequest::Check { responder, .. } => {
178                    responder
179                        .send(Err(zx::Status::NOT_SUPPORTED.into_raw()))
180                        .unwrap_or_else(|e| log::error!(e:?; "Failed to send Check response"));
181                }
182            }
183        }
184        Ok(())
185    }
186
187    async fn start(
188        self: &Arc<Self>,
189        device: fblock::BlockProxy,
190        config: Config,
191    ) -> Result<(), zx::Status> {
192        log::info!(config:?; "GPT starting");
193        let mut state = self.state.lock().await;
194        if !state.is_stopped() {
195            log::warn!("Device already bound");
196            return Err(zx::Status::ALREADY_BOUND);
197        }
198
199        // TODO(https://fxbug.dev/339491886): It would be better if `start` failed on a malformed
200        // device, rather than hiding this state from fshost.  However, fs_management isn't well set
201        // up to deal with this, because it ties the outgoing directory to a successful return from
202        // `start` (see `ServingMultiVolumeFilesystem`), and fshost resolves the queued requests for
203        // the filesystem only after `start` is successful.  We should refactor fs_management and
204        // fshost to better support this case, which might require changing how queueing works so
205        // there's more flexibility to either resolve queueing when the component starts up (what we
206        // need here), or when `Start` is successful (what a filesystem like Fxfs needs).
207        *state = match GptManager::new_with_config(
208            device.clone(),
209            self.partitions_dir.clone(),
210            config.clone(),
211        )
212        .await
213        {
214            Ok(runner) => State::Running(runner),
215            Err(err) => {
216                // This is a warning because, as described above, we can't return an error from
217                // here, so there are normal flows that can print this error message that don't
218                // indicate a bug or incorrect behavior.
219                log::warn!(err:?; "Failed to load GPT.  Reformatting may be required.");
220                State::NeedsFormatting(config, device)
221            }
222        };
223        Ok(())
224    }
225
226    async fn handle_partitions_manager_requests(
227        self: Arc<Self>,
228        mut stream: fpartitions::PartitionsManagerRequestStream,
229    ) -> Result<(), Error> {
230        while let Some(request) = stream.try_next().await.context("Reading request")? {
231            log::debug!(request:?; "");
232            match request {
233                fpartitions::PartitionsManagerRequest::GetBlockInfo { responder } => {
234                    responder
235                        .send(self.get_block_info().await.map_err(|status| status.into_raw()))
236                        .unwrap_or_else(
237                            |e| log::error!(e:?; "Failed to send GetBlockInfo response"),
238                        );
239                }
240                fpartitions::PartitionsManagerRequest::CreateTransaction { responder } => {
241                    responder
242                        .send(self.create_transaction().await.map_err(|status| status.into_raw()))
243                        .unwrap_or_else(
244                            |e| log::error!(e:?; "Failed to send CreateTransaction response"),
245                        );
246                }
247                fpartitions::PartitionsManagerRequest::CommitTransaction {
248                    transaction,
249                    responder,
250                } => {
251                    responder
252                        .send(
253                            self.commit_transaction(transaction)
254                                .await
255                                .map_err(|status| status.into_raw()),
256                        )
257                        .unwrap_or_else(
258                            |e| log::error!(e:?; "Failed to send CommitTransaction response"),
259                        );
260                }
261                fpartitions::PartitionsManagerRequest::AddPartition { payload, responder } => {
262                    responder
263                        .send(self.add_partition(payload).await.map_err(|status| status.into_raw()))
264                        .unwrap_or_else(
265                            |e| log::error!(e:?; "Failed to send AddPartition response"),
266                        );
267                }
268            }
269        }
270        Ok(())
271    }
272
273    async fn get_block_info(&self) -> Result<(u64, u32), zx::Status> {
274        let state = self.state.lock().await;
275        match &*state {
276            State::Stopped => return Err(zx::Status::BAD_STATE),
277            State::NeedsFormatting(_, block) => {
278                let info = block
279                    .get_info()
280                    .await
281                    .map_err(|err| {
282                        log::error!(err:?; "get_block_info: failed to query block info");
283                        zx::Status::IO
284                    })?
285                    .map_err(zx::Status::from_raw)?;
286                Ok((info.block_count, info.block_size))
287            }
288            State::Running(gpt) => Ok((gpt.block_count(), gpt.block_size())),
289        }
290    }
291
292    async fn create_transaction(&self) -> Result<zx::EventPair, zx::Status> {
293        let gpt_manager = self.gpt_manager().await?;
294        gpt_manager.create_transaction().await
295    }
296
297    async fn commit_transaction(&self, transaction: zx::EventPair) -> Result<(), zx::Status> {
298        let gpt_manager = self.gpt_manager().await?;
299        gpt_manager.commit_transaction(transaction).await
300    }
301
302    async fn add_partition(
303        &self,
304        request: fpartitions::PartitionsManagerAddPartitionRequest,
305    ) -> Result<(), zx::Status> {
306        let gpt_manager = self.gpt_manager().await?;
307        gpt_manager.add_partition(request).await
308    }
309
310    async fn handle_partitions_admin_requests(
311        self: Arc<Self>,
312        mut stream: fpartitions::PartitionsAdminRequestStream,
313    ) -> Result<(), Error> {
314        while let Some(request) = stream.try_next().await.context("Reading request")? {
315            log::debug!(request:?; "");
316            match request {
317                fpartitions::PartitionsAdminRequest::ResetPartitionTable {
318                    partitions,
319                    responder,
320                } => {
321                    responder
322                        .send(
323                            self.reset_partition_table(partitions)
324                                .await
325                                .map_err(|status| status.into_raw()),
326                        )
327                        .unwrap_or_else(|e| log::error!(e:?; "Failed to send Start response"));
328                }
329            }
330        }
331        Ok(())
332    }
333
334    async fn reset_partition_table(
335        &self,
336        partitions: Vec<fpartitions::PartitionInfo>,
337    ) -> Result<(), zx::Status> {
338        fn convert_partition_info(info: fpartitions::PartitionInfo) -> gpt::PartitionInfo {
339            gpt::PartitionInfo {
340                label: info.name,
341                type_guid: gpt::Guid::from_bytes(info.type_guid.value),
342                instance_guid: gpt::Guid::from_bytes(info.instance_guid.value),
343                start_block: info.start_block,
344                num_blocks: info.num_blocks,
345                flags: info.flags,
346            }
347        }
348        let partitions = partitions.into_iter().map(convert_partition_info).collect::<Vec<_>>();
349
350        let mut state = self.state.lock().await;
351        match &mut *state {
352            State::Stopped => return Err(zx::Status::BAD_STATE),
353            State::NeedsFormatting(config, block) => {
354                log::info!("reset_partition_table: Reformatting GPT.");
355                let client = Arc::new(RemoteBlockClient::new(block.clone()).await?);
356
357                log::info!("reset_partition_table: Reformatting GPT...");
358                gpt::Gpt::format(client, partitions).await.map_err(|err| {
359                    log::error!(err:?; "reset_partition_table: failed to init GPT");
360                    zx::Status::IO
361                })?;
362                *state = State::Running(
363                    GptManager::new_with_config(
364                        block.clone(),
365                        self.partitions_dir.clone(),
366                        config.clone(),
367                    )
368                    .await
369                    .map_err(|err| {
370                        log::error!(err:?; "reset_partition_table: failed to re-launch GPT");
371                        zx::Status::BAD_STATE
372                    })?,
373                );
374            }
375            State::Running(gpt) => {
376                log::info!("reset_partition_table: Updating GPT.");
377                gpt.reset_partition_table(partitions).await?;
378            }
379        }
380        Ok(())
381    }
382
383    async fn handle_admin_requests(
384        &self,
385        mut stream: ffs::AdminRequestStream,
386    ) -> Result<(), Error> {
387        if let Some(request) = stream.try_next().await.context("Reading request")? {
388            match request {
389                ffs::AdminRequest::Shutdown { responder } => {
390                    log::info!("Received Admin::Shutdown request");
391                    self.shutdown().await;
392                    responder
393                        .send()
394                        .unwrap_or_else(|e| log::error!(e:?; "Failed to send shutdown response"));
395                    log::info!("Admin shutdown complete");
396                }
397            }
398        }
399        Ok(())
400    }
401
402    async fn handle_lifecycle_requests(&self, lifecycle_channel: zx::Channel) -> Result<(), Error> {
403        let mut stream = flifecycle::LifecycleRequestStream::from_channel(
404            fasync::Channel::from_channel(lifecycle_channel),
405        );
406        match stream.try_next().await.context("Reading request")? {
407            Some(flifecycle::LifecycleRequest::Stop { .. }) => {
408                log::info!("Received Lifecycle::Stop request");
409                self.shutdown().await;
410                log::info!("Lifecycle shutdown complete");
411            }
412            None => {}
413        }
414        Ok(())
415    }
416
417    async fn gpt_manager(&self) -> Result<Arc<GptManager>, zx::Status> {
418        match &*self.state.lock().await {
419            State::Stopped | State::NeedsFormatting(_, _) => Err(zx::Status::BAD_STATE),
420            State::Running(gpt) => Ok(gpt.clone()),
421        }
422    }
423
424    async fn shutdown(&self) {
425        let mut state = self.state.lock().await;
426        if let State::Running(gpt) = std::mem::take(&mut *state) {
427            gpt.shutdown().await;
428        }
429        self.scope.shutdown();
430    }
431}
432
433#[cfg(test)]
434mod tests {
435    use super::GptService;
436    use block_client::RemoteBlockClient;
437    use fidl::endpoints::Proxy as _;
438    use fidl_fuchsia_fs as ffs;
439    use fidl_fuchsia_fs_startup as fstartup;
440    use fidl_fuchsia_io as fio;
441    use fidl_fuchsia_process_lifecycle::LifecycleMarker;
442    use fidl_fuchsia_storage_block as fblock;
443    use fidl_fuchsia_storage_partitions as fpartitions;
444    use fuchsia_async as fasync;
445    use fuchsia_component::client::connect_to_protocol_at_dir_svc;
446    use futures::FutureExt as _;
447    use gpt::{Gpt, Guid, PartitionInfo};
448    use std::sync::Arc;
449    use test_vmo_backed_block_server::VmoBackedServer;
450
451    async fn setup_server(
452        block_size: u32,
453        block_count: u64,
454        partitions: Vec<PartitionInfo>,
455    ) -> Arc<VmoBackedServer> {
456        let vmo = zx::Vmo::create(block_size as u64 * block_count).unwrap();
457        let server = Arc::new(
458            VmoBackedServer::from_vmo(512, vmo).expect("Failed to create VmoBackedServer"),
459        );
460        {
461            let (block_client, block_server) =
462                fidl::endpoints::create_proxy::<fblock::BlockMarker>();
463            let volume_stream = fidl::endpoints::ServerEnd::<fblock::BlockMarker>::from(
464                block_server.into_channel(),
465            )
466            .into_stream();
467            let server_clone = server.clone();
468            let _task = fasync::Task::spawn(async move { server_clone.serve(volume_stream).await });
469            let client = Arc::new(RemoteBlockClient::new(block_client).await.unwrap());
470            Gpt::format(client, partitions).await.unwrap();
471        }
472        server
473    }
474
475    #[fuchsia::test]
476    async fn lifecycle() {
477        let (outgoing_dir, outgoing_dir_server) =
478            fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
479        let (lifecycle_client, lifecycle_server) =
480            fidl::endpoints::create_proxy::<LifecycleMarker>();
481        let (block_client, block_server) =
482            fidl::endpoints::create_endpoints::<fblock::BlockMarker>();
483        let volume_stream =
484            fidl::endpoints::ServerEnd::<fblock::BlockMarker>::from(block_server.into_channel())
485                .into_stream();
486
487        futures::join!(
488            async {
489                // Client
490                let client =
491                    connect_to_protocol_at_dir_svc::<fstartup::StartupMarker>(&outgoing_dir)
492                        .unwrap();
493                client
494                    .start(block_client, &fstartup::StartOptions::default())
495                    .await
496                    .expect("FIDL error")
497                    .expect("Start failed");
498                lifecycle_client.stop().expect("Stop failed");
499                fasync::OnSignals::new(
500                    &lifecycle_client.into_channel().expect("into_channel failed"),
501                    zx::Signals::CHANNEL_PEER_CLOSED,
502                )
503                .await
504                .expect("OnSignals failed");
505            },
506            async {
507                // Server
508                let service = GptService::new();
509                service
510                    .run(outgoing_dir_server.into_channel(), Some(lifecycle_server.into_channel()))
511                    .await
512                    .expect("Run failed");
513            },
514            async {
515                // Block device
516                let server = setup_server(
517                    512,
518                    8,
519                    vec![PartitionInfo {
520                        label: "part".to_string(),
521                        type_guid: Guid::from_bytes([0xabu8; 16]),
522                        instance_guid: Guid::from_bytes([0xcdu8; 16]),
523                        start_block: 4,
524                        num_blocks: 1,
525                        flags: 0,
526                    }],
527                )
528                .await;
529                let _ = server.serve(volume_stream).await;
530            }
531            .fuse(),
532        );
533    }
534
535    #[fuchsia::test]
536    async fn admin_shutdown() {
537        let (outgoing_dir, outgoing_dir_server) =
538            fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
539        let (block_client, block_server) =
540            fidl::endpoints::create_endpoints::<fblock::BlockMarker>();
541        let volume_stream =
542            fidl::endpoints::ServerEnd::<fblock::BlockMarker>::from(block_server.into_channel())
543                .into_stream();
544
545        futures::join!(
546            async {
547                // Client
548                let client =
549                    connect_to_protocol_at_dir_svc::<fstartup::StartupMarker>(&outgoing_dir)
550                        .unwrap();
551                let admin_client =
552                    connect_to_protocol_at_dir_svc::<ffs::AdminMarker>(&outgoing_dir).unwrap();
553                client
554                    .start(block_client, &fstartup::StartOptions::default())
555                    .await
556                    .expect("FIDL error")
557                    .expect("Start failed");
558                admin_client.shutdown().await.expect("admin shutdown failed");
559                fasync::OnSignals::new(
560                    &admin_client.into_channel().expect("into_channel failed"),
561                    zx::Signals::CHANNEL_PEER_CLOSED,
562                )
563                .await
564                .expect("OnSignals failed");
565            },
566            async {
567                // Server
568                let service = GptService::new();
569                service.run(outgoing_dir_server.into_channel(), None).await.expect("Run failed");
570            },
571            async {
572                // Block device
573                let server = setup_server(
574                    512,
575                    8,
576                    vec![PartitionInfo {
577                        label: "part".to_string(),
578                        type_guid: Guid::from_bytes([0xabu8; 16]),
579                        instance_guid: Guid::from_bytes([0xcdu8; 16]),
580                        start_block: 4,
581                        num_blocks: 1,
582                        flags: 0,
583                    }],
584                )
585                .await;
586                let _ = server.serve(volume_stream).await;
587            }
588            .fuse(),
589        );
590    }
591
592    #[fuchsia::test]
593    async fn transaction_lifecycle() {
594        let (outgoing_dir, outgoing_dir_server) =
595            fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
596        let (lifecycle_client, lifecycle_server) =
597            fidl::endpoints::create_proxy::<LifecycleMarker>();
598        let (block_client, block_server) =
599            fidl::endpoints::create_endpoints::<fblock::BlockMarker>();
600        let volume_stream =
601            fidl::endpoints::ServerEnd::<fblock::BlockMarker>::from(block_server.into_channel())
602                .into_stream();
603
604        futures::join!(
605            async {
606                // Client
607                connect_to_protocol_at_dir_svc::<fstartup::StartupMarker>(&outgoing_dir)
608                    .unwrap()
609                    .start(block_client, &fstartup::StartOptions::default())
610                    .await
611                    .expect("FIDL error")
612                    .expect("Start failed");
613
614                let pm_client = connect_to_protocol_at_dir_svc::<
615                    fpartitions::PartitionsManagerMarker,
616                >(&outgoing_dir)
617                .unwrap();
618                let transaction = pm_client
619                    .create_transaction()
620                    .await
621                    .expect("FIDL error")
622                    .expect("create_transaction failed");
623
624                pm_client
625                    .create_transaction()
626                    .await
627                    .expect("FIDL error")
628                    .expect_err("create_transaction should fail while other txn exists");
629
630                pm_client
631                    .commit_transaction(transaction)
632                    .await
633                    .expect("FIDL error")
634                    .expect("commit_transaction failed");
635
636                {
637                    let _transaction = pm_client
638                        .create_transaction()
639                        .await
640                        .expect("FIDL error")
641                        .expect("create_transaction should succeed after committing txn");
642                }
643
644                pm_client
645                    .create_transaction()
646                    .await
647                    .expect("FIDL error")
648                    .expect("create_transaction should succeed after dropping txn");
649
650                lifecycle_client.stop().expect("Stop failed");
651                fasync::OnSignals::new(
652                    &lifecycle_client.into_channel().expect("into_channel failed"),
653                    zx::Signals::CHANNEL_PEER_CLOSED,
654                )
655                .await
656                .expect("OnSignals failed");
657            },
658            async {
659                // Server
660                let service = GptService::new();
661                service
662                    .run(outgoing_dir_server.into_channel(), Some(lifecycle_server.into_channel()))
663                    .await
664                    .expect("Run failed");
665            },
666            async {
667                // Block device
668                let server = setup_server(
669                    512,
670                    16,
671                    vec![PartitionInfo {
672                        label: "part".to_string(),
673                        type_guid: Guid::from_bytes([0xabu8; 16]),
674                        instance_guid: Guid::from_bytes([0xcdu8; 16]),
675                        start_block: 4,
676                        num_blocks: 1,
677                        flags: 0,
678                    }],
679                )
680                .await;
681                let _ = server.serve(volume_stream).await;
682            }
683            .fuse(),
684        );
685    }
686}