Skip to main content

fs_management/
filesystem.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Contains the asynchronous version of [`Filesystem`][`crate::Filesystem`].
6
7use crate::error::{QueryError, ShutdownError};
8use crate::{ComponentType, FSConfig, Options};
9use anyhow::{Context, Error, anyhow, bail, ensure};
10use fidl::endpoints::{ClientEnd, ServerEnd, create_endpoints, create_proxy};
11use fidl_fuchsia_component::{self as fcomponent, RealmMarker};
12use fidl_fuchsia_component_decl as fdecl;
13use fidl_fuchsia_fs::AdminMarker;
14use fidl_fuchsia_fs_startup::{
15    CheckOptions, CreateOptions, MountOptions, StartupMarker, VolumesMarker,
16};
17use fidl_fuchsia_io as fio;
18use fidl_fuchsia_storage_block::{self as fblock, BlockMarker};
19use fuchsia_component_client::{
20    connect_to_named_protocol_at_dir_root, connect_to_protocol, connect_to_protocol_at_dir_root,
21    connect_to_protocol_at_dir_svc, open_childs_exposed_directory,
22};
23use std::sync::Arc;
24use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
25use zx::Status;
26
27/// Creates new connections to an instance of fuchsia.hardware.block.Block and similar protocols
28/// (Volume, Partition).
29///
30/// NOTE: It is important to understand the difference between `BlockConnector` and the actual
31/// protocols (e.g. a `ClientEnd<BlockMarker>` or `BlockProxy`): `BlockConnector` is used to *create
32/// new connections* to a Block.
33///
34/// It is not possible to directly convert a `ClientEnd<BlockMarker>` (or `BlockProxy`) into a
35/// `BlockConnector`, because Block is not cloneable.  To implement `BlockConnector`, you will need
36/// a way to generate new connections to a Block instance.  A few common implementations are
37/// provided below.
38pub trait BlockConnector: Send + Sync {
39    fn connect_channel_to_block(&self, server_end: ServerEnd<BlockMarker>) -> Result<(), Error>;
40    fn connect_block(&self) -> Result<ClientEnd<BlockMarker>, Error> {
41        let (client, server) = fidl::endpoints::create_endpoints();
42        self.connect_channel_to_block(server)?;
43        Ok(client)
44    }
45}
46
47/// Implements `BlockConnector` via a service dir.  Wraps `connect_to_named_protocol_at_dir_root`.
48#[derive(Clone, Debug)]
49pub struct DirBasedBlockConnector(fio::DirectoryProxy, String);
50
51impl DirBasedBlockConnector {
52    /// Creates a new [`DirBasedBlockConnector`].  It is expected that `path` within `dir` hosts the
53    /// Volume protocol.
54    pub fn new(dir: fio::DirectoryProxy, path: String) -> Self {
55        Self(dir, path)
56    }
57
58    pub fn dir(&self) -> &fio::DirectoryProxy {
59        &self.0
60    }
61
62    /// Returns the path relative to the directory which hosts the volume protocol.
63    pub fn path(&self) -> &str {
64        &self.1
65    }
66}
67
68impl BlockConnector for DirBasedBlockConnector {
69    fn connect_channel_to_block(&self, server_end: ServerEnd<BlockMarker>) -> Result<(), Error> {
70        self.0.open(
71            self.path(),
72            fio::Flags::PROTOCOL_SERVICE,
73            &fio::Options::default(),
74            server_end.into_channel(),
75        )?;
76        Ok(())
77    }
78}
79
80impl BlockConnector for fidl_fuchsia_device::ControllerProxy {
81    fn connect_channel_to_block(&self, server_end: ServerEnd<BlockMarker>) -> Result<(), Error> {
82        let () = self.connect_to_device_fidl(server_end.into_channel())?;
83        Ok(())
84    }
85}
86
87impl BlockConnector for fidl_fuchsia_storage_partitions::PartitionServiceProxy {
88    fn connect_channel_to_block(&self, server_end: ServerEnd<BlockMarker>) -> Result<(), Error> {
89        self.connect_channel_to_volume(server_end)?;
90        Ok(())
91    }
92}
93
94// NB: We have to be specific here; we cannot do a blanket impl for AsRef<T: BlockConnector> because
95// that would conflict with a downstream crate that implements AsRef for a concrete BlockConnector
96// defined here already.
97impl<T: BlockConnector> BlockConnector for Arc<T> {
98    fn connect_channel_to_block(&self, server_end: ServerEnd<BlockMarker>) -> Result<(), Error> {
99        self.as_ref().connect_channel_to_block(server_end)
100    }
101}
102
103impl<F> BlockConnector for F
104where
105    F: Fn(ServerEnd<BlockMarker>) -> Result<(), Error> + Send + Sync,
106{
107    fn connect_channel_to_block(&self, server_end: ServerEnd<BlockMarker>) -> Result<(), Error> {
108        self(server_end)
109    }
110}
111
112/// Asynchronously manages a block device for filesystem operations.
113pub struct Filesystem {
114    /// The filesystem struct keeps the FSConfig in a Box<dyn> instead of holding it directly for
115    /// code size reasons. Using a type parameter instead would make monomorphized versions of the
116    /// Filesystem impl block for each filesystem type, which duplicates several multi-kilobyte
117    /// functions (get_component_exposed_dir and serve in particular) that are otherwise quite
118    /// generic over config. Clients that want to be generic over filesystem type also pay the
119    /// monomorphization cost, with some, like fshost, paying a lot.
120    config: Box<dyn FSConfig>,
121    block_connector: Box<dyn BlockConnector>,
122    component: Option<Arc<DynamicComponentInstance>>,
123}
124
125// Used to disambiguate children in our component collection.
126static COLLECTION_COUNTER: AtomicU64 = AtomicU64::new(0);
127
128impl Filesystem {
129    pub fn config(&self) -> &dyn FSConfig {
130        self.config.as_ref()
131    }
132
133    pub fn into_config(self) -> Box<dyn FSConfig> {
134        self.config
135    }
136
137    /// Creates a new `Filesystem`.
138    pub fn new<B: BlockConnector + 'static, FSC: FSConfig>(
139        block_connector: B,
140        config: FSC,
141    ) -> Self {
142        Self::from_boxed_config(Box::new(block_connector), Box::new(config))
143    }
144
145    /// Creates a new `Filesystem`.
146    pub fn from_boxed_config(
147        block_connector: Box<dyn BlockConnector>,
148        config: Box<dyn FSConfig>,
149    ) -> Self {
150        Self { config, block_connector, component: None }
151    }
152
153    /// Returns the (relative) moniker of the filesystem component. This will start the component
154    /// instance if it is not running.
155    pub async fn get_component_moniker(&mut self) -> Result<String, Error> {
156        let _ = self.get_component_exposed_dir().await?;
157        Ok(match self.config.options().component_type {
158            ComponentType::StaticChild => self.config.options().component_name.to_string(),
159            ComponentType::DynamicChild { .. } => {
160                let component = self.component.as_ref().unwrap();
161                format!("{}:{}", component.collection, component.name)
162            }
163        })
164    }
165
166    async fn get_component_exposed_dir(&mut self) -> Result<fio::DirectoryProxy, Error> {
167        let options = self.config.options();
168        let component_name = options.component_name;
169        match options.component_type {
170            ComponentType::StaticChild => open_childs_exposed_directory(component_name, None).await,
171            ComponentType::DynamicChild { collection_name } => {
172                if let Some(component) = &self.component {
173                    return open_childs_exposed_directory(
174                        component.name.clone(),
175                        Some(component.collection.clone()),
176                    )
177                    .await;
178                }
179
180                // We need a unique name, so we pull in the process Koid here since it's possible
181                // for the same binary in a component to be launched multiple times and we don't
182                // want to collide with children created by other processes.
183                let name = format!(
184                    "{}-{}-{}",
185                    component_name,
186                    fuchsia_runtime::process_self().koid().unwrap().raw_koid(),
187                    COLLECTION_COUNTER.fetch_add(1, Ordering::Relaxed)
188                );
189
190                let collection_ref = fdecl::CollectionRef { name: collection_name };
191                let child_decls = vec![
192                    fdecl::Child {
193                        name: Some(format!("{}-relative", name)),
194                        url: Some(format!("#meta/{}.cm", component_name)),
195                        startup: Some(fdecl::StartupMode::Lazy),
196                        ..Default::default()
197                    },
198                    fdecl::Child {
199                        name: Some(name),
200                        url: Some(format!(
201                            "fuchsia-boot:///{}#meta/{}.cm",
202                            component_name, component_name
203                        )),
204                        startup: Some(fdecl::StartupMode::Lazy),
205                        ..Default::default()
206                    },
207                ];
208                let realm_proxy = connect_to_protocol::<RealmMarker>()?;
209                for child_decl in child_decls {
210                    // Launch a new component in our collection.
211                    realm_proxy
212                        .create_child(
213                            &collection_ref,
214                            &child_decl,
215                            fcomponent::CreateChildArgs::default(),
216                        )
217                        .await?
218                        .map_err(|e| anyhow!("create_child failed: {:?}", e))?;
219
220                    let component = Arc::new(DynamicComponentInstance {
221                        name: child_decl.name.unwrap(),
222                        collection: collection_ref.name.clone(),
223                        should_not_drop: AtomicBool::new(false),
224                    });
225
226                    if let Ok(proxy) = open_childs_exposed_directory(
227                        component.name.clone(),
228                        Some(component.collection.clone()),
229                    )
230                    .await
231                    {
232                        self.component = Some(component);
233                        return Ok(proxy);
234                    }
235                }
236                Err(anyhow!("Failed to open exposed directory"))
237            }
238        }
239    }
240
241    /// Calls fuchsia.fs.startup/Startup.Format on the configured filesystem component.
242    ///
243    /// Which component is used and the options passed to it are controlled by the config this
244    /// `Filesystem` was created with.
245    ///
246    /// See [`FSConfig`].
247    ///
248    /// # Errors
249    ///
250    /// Returns any errors from the Format method. Also returns an error if the startup protocol is
251    /// not found, if it couldn't launch or find the filesystem component, or if it couldn't get
252    /// the block device channel.
253    pub async fn format(&mut self) -> Result<(), Error> {
254        let channel = self.block_connector.connect_block()?;
255
256        let exposed_dir = self.get_component_exposed_dir().await?;
257        let proxy = connect_to_protocol_at_dir_root::<StartupMarker>(&exposed_dir)?;
258        proxy
259            .format(channel, &self.config().options().format_options)
260            .await?
261            .map_err(Status::from_raw)?;
262
263        Ok(())
264    }
265
266    /// Calls fuchsia.fs.startup/Startup.Check on the configured filesystem component.
267    ///
268    /// Which component is used and the options passed to it are controlled by the config this
269    /// `Filesystem` was created with.
270    ///
271    /// See [`FSConfig`].
272    ///
273    /// # Errors
274    ///
275    /// Returns any errors from the Check method. Also returns an error if the startup protocol is
276    /// not found, if it couldn't launch or find the filesystem component, or if it couldn't get
277    /// the block device channel.
278    pub async fn fsck(&mut self) -> Result<(), Error> {
279        let channel = self.block_connector.connect_block()?;
280        let exposed_dir = self.get_component_exposed_dir().await?;
281        let proxy = connect_to_protocol_at_dir_root::<StartupMarker>(&exposed_dir)?;
282        proxy.check(channel, CheckOptions::default()).await?.map_err(Status::from_raw)?;
283        Ok(())
284    }
285
286    /// Serves the filesystem on the block device and returns a [`ServingSingleVolumeFilesystem`]
287    /// representing the running filesystem component.
288    ///
289    /// # Errors
290    ///
291    /// Returns [`Err`] if serving the filesystem failed.
292    pub async fn serve(mut self) -> Result<ServingSingleVolumeFilesystem, Error> {
293        if self.config.is_multi_volume() {
294            bail!("Can't serve a multivolume filesystem; use serve_multi_volume");
295        }
296        let Options { start_options, reuse_component_after_serving, .. } = self.config.options();
297
298        let exposed_dir = self.get_component_exposed_dir().await?;
299        let proxy = connect_to_protocol_at_dir_root::<StartupMarker>(&exposed_dir)?;
300        proxy
301            .start(self.block_connector.connect_block()?, &start_options)
302            .await?
303            .map_err(Status::from_raw)?;
304
305        let (root_dir, server_end) = create_endpoints::<fio::NodeMarker>();
306        exposed_dir.open(
307            "root",
308            fio::PERM_READABLE | fio::Flags::PERM_INHERIT_WRITE | fio::Flags::PERM_INHERIT_EXECUTE,
309            &Default::default(),
310            server_end.into_channel(),
311        )?;
312        let component = self.component.clone();
313        if !reuse_component_after_serving {
314            self.component = None;
315        }
316        Ok(ServingSingleVolumeFilesystem {
317            component,
318            exposed_dir: Some(exposed_dir),
319            root_dir: ClientEnd::<fio::DirectoryMarker>::new(root_dir.into_channel()).into_proxy(),
320            binding: None,
321        })
322    }
323
324    /// Serves the filesystem on the block device and returns a [`ServingMultiVolumeFilesystem`]
325    /// representing the running filesystem component.  No volumes are opened; clients have to do
326    /// that explicitly.
327    ///
328    /// # Errors
329    ///
330    /// Returns [`Err`] if serving the filesystem failed.
331    pub async fn serve_multi_volume(mut self) -> Result<ServingMultiVolumeFilesystem, Error> {
332        if !self.config.is_multi_volume() {
333            bail!("Can't serve_multi_volume a single-volume filesystem; use serve");
334        }
335
336        let exposed_dir = self.get_component_exposed_dir().await?;
337        let proxy = connect_to_protocol_at_dir_root::<StartupMarker>(&exposed_dir)?;
338        proxy
339            .start(self.block_connector.connect_block()?, &self.config.options().start_options)
340            .await?
341            .map_err(Status::from_raw)?;
342
343        Ok(ServingMultiVolumeFilesystem {
344            component: self.component,
345            exposed_dir: Some(exposed_dir),
346        })
347    }
348}
349
350// Destroys the child when dropped.
351struct DynamicComponentInstance {
352    name: String,
353    collection: String,
354    should_not_drop: AtomicBool,
355}
356
357impl DynamicComponentInstance {
358    fn forget(&self) {
359        self.should_not_drop.store(true, Ordering::Relaxed);
360    }
361}
362
363impl Drop for DynamicComponentInstance {
364    fn drop(&mut self) {
365        if self.should_not_drop.load(Ordering::Relaxed) {
366            return;
367        }
368        if let Ok(realm_proxy) = connect_to_protocol::<RealmMarker>() {
369            let _ = realm_proxy.destroy_child(&fdecl::ChildRef {
370                name: self.name.clone(),
371                collection: Some(self.collection.clone()),
372            });
373        }
374    }
375}
376
377/// Manages the binding of a `fuchsia_io::DirectoryProxy` into the local namespace.  When the object
378/// is dropped, the binding is removed.
379#[derive(Default)]
380pub struct NamespaceBinding(String);
381
382impl NamespaceBinding {
383    pub fn create(root_dir: &fio::DirectoryProxy, path: String) -> Result<NamespaceBinding, Error> {
384        let (client_end, server_end) = create_endpoints();
385        root_dir.clone(ServerEnd::new(server_end.into_channel()))?;
386        let namespace = fdio::Namespace::installed()?;
387        namespace.bind(&path, client_end)?;
388        Ok(Self(path))
389    }
390}
391
392impl std::ops::Deref for NamespaceBinding {
393    type Target = str;
394    fn deref(&self) -> &Self::Target {
395        &self.0
396    }
397}
398
399impl Drop for NamespaceBinding {
400    fn drop(&mut self) {
401        if let Ok(namespace) = fdio::Namespace::installed() {
402            let _ = namespace.unbind(&self.0);
403        }
404    }
405}
406
407// TODO(https://fxbug.dev/42174810): Soft migration; remove this after completion
408pub type ServingFilesystem = ServingSingleVolumeFilesystem;
409
410/// Asynchronously manages a serving filesystem. Created from [`Filesystem::serve()`].
411pub struct ServingSingleVolumeFilesystem {
412    component: Option<Arc<DynamicComponentInstance>>,
413    // exposed_dir will always be Some, except when the filesystem is shutting down.
414    exposed_dir: Option<fio::DirectoryProxy>,
415    root_dir: fio::DirectoryProxy,
416
417    // The path in the local namespace that this filesystem is bound to (optional).
418    binding: Option<NamespaceBinding>,
419}
420
421impl ServingSingleVolumeFilesystem {
422    /// Returns a proxy to the exposed directory of the serving filesystem.
423    pub fn exposed_dir(&self) -> &fio::DirectoryProxy {
424        self.exposed_dir.as_ref().unwrap()
425    }
426
427    /// Returns a proxy to the root directory of the serving filesystem.
428    pub fn root(&self) -> &fio::DirectoryProxy {
429        &self.root_dir
430    }
431
432    /// Binds the root directory being served by this filesystem to a path in the local namespace.
433    /// The path must be absolute, containing no "." nor ".." entries.  The binding will be dropped
434    /// when self is dropped.  Only one binding is supported.
435    ///
436    /// # Errors
437    ///
438    /// Returns [`Err`] if binding failed.
439    pub fn bind_to_path(&mut self, path: &str) -> Result<(), Error> {
440        ensure!(self.binding.is_none(), "Already bound");
441        self.binding = Some(NamespaceBinding::create(&self.root_dir, path.to_string())?);
442        Ok(())
443    }
444
445    pub fn bound_path(&self) -> Option<&str> {
446        self.binding.as_deref()
447    }
448
449    /// Returns a [`FilesystemInfo`] object containing information about the serving filesystem.
450    ///
451    /// # Errors
452    ///
453    /// Returns [`Err`] if querying the filesystem failed.
454    pub async fn query(&self) -> Result<Box<fio::FilesystemInfo>, QueryError> {
455        let (status, info) = self.root_dir.query_filesystem().await?;
456        Status::ok(status).map_err(QueryError::DirectoryQuery)?;
457        info.ok_or(QueryError::DirectoryEmptyResult)
458    }
459
460    /// Take the exposed dir from this filesystem instance, dropping the management struct without
461    /// shutting the filesystem down. This leaves the caller with the responsibility of shutting
462    /// down the filesystem, and the filesystem component if necessary.
463    pub fn take_exposed_dir(mut self) -> fio::DirectoryProxy {
464        self.component.take().expect("BUG: component missing").forget();
465        self.exposed_dir.take().expect("BUG: exposed dir missing")
466    }
467
468    /// Attempts to shutdown the filesystem using the
469    /// [`fidl_fuchsia_fs::AdminProxy::shutdown()`] FIDL method and waiting for the filesystem
470    /// process to terminate.
471    ///
472    /// # Errors
473    ///
474    /// Returns [`Err`] if the shutdown failed or the filesystem process did not terminate.
475    pub async fn shutdown(mut self) -> Result<(), ShutdownError> {
476        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(
477            &self.exposed_dir.take().expect("BUG: exposed dir missing"),
478        )?
479        .shutdown()
480        .await?;
481        Ok(())
482    }
483
484    /// Attempts to kill the filesystem process and waits for the process to terminate.
485    ///
486    /// # Errors
487    ///
488    /// Returns [`Err`] if the filesystem process could not be terminated. There is no way to
489    /// recover the [`Filesystem`] from this error.
490    pub async fn kill(self) -> Result<(), Error> {
491        // For components, just shut down the filesystem.
492        // TODO(https://fxbug.dev/293949323): Figure out a way to make this more abrupt - the use-cases are
493        // either testing or when the filesystem isn't responding.
494        self.shutdown().await?;
495        Ok(())
496    }
497}
498
499impl Drop for ServingSingleVolumeFilesystem {
500    fn drop(&mut self) {
501        // Make a best effort attempt to shut down to the filesystem, if we need to.
502        if let Some(exposed_dir) = self.exposed_dir.take() {
503            if let Ok(proxy) =
504                connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(&exposed_dir)
505            {
506                let _ = proxy.shutdown();
507            }
508        }
509    }
510}
511
512/// Asynchronously manages a serving multivolume filesystem. Created from
513/// [`Filesystem::serve_multi_volume()`].
514pub struct ServingMultiVolumeFilesystem {
515    component: Option<Arc<DynamicComponentInstance>>,
516    // exposed_dir will always be Some, except in Self::shutdown.
517    exposed_dir: Option<fio::DirectoryProxy>,
518}
519
520/// Represents an opened volume in a [`ServingMultiVolumeFilesystem'] instance.
521pub struct ServingVolume {
522    root_dir: fio::DirectoryProxy,
523    binding: Option<NamespaceBinding>,
524    exposed_dir: fio::DirectoryProxy,
525}
526
527impl ServingVolume {
528    fn new(exposed_dir: fio::DirectoryProxy) -> Result<Self, Error> {
529        let (root_dir, server_end) = create_endpoints::<fio::NodeMarker>();
530        exposed_dir.open(
531            "root",
532            fio::PERM_READABLE | fio::Flags::PERM_INHERIT_WRITE | fio::Flags::PERM_INHERIT_EXECUTE,
533            &Default::default(),
534            server_end.into_channel(),
535        )?;
536        Ok(ServingVolume {
537            root_dir: ClientEnd::<fio::DirectoryMarker>::new(root_dir.into_channel()).into_proxy(),
538            binding: None,
539            exposed_dir,
540        })
541    }
542
543    /// Returns a proxy to the root directory of the serving volume.
544    pub fn root(&self) -> &fio::DirectoryProxy {
545        &self.root_dir
546    }
547
548    /// Returns a proxy to the exposed directory of the serving volume.
549    pub fn exposed_dir(&self) -> &fio::DirectoryProxy {
550        &self.exposed_dir
551    }
552
553    /// Binds the root directory being served by this filesystem to a path in the local namespace.
554    /// The path must be absolute, containing no "." nor ".." entries.  The binding will be dropped
555    /// when self is dropped, or when unbind_path is called.  Only one binding is supported.
556    ///
557    /// # Errors
558    ///
559    /// Returns [`Err`] if binding failed, or if a binding already exists.
560    pub fn bind_to_path(&mut self, path: &str) -> Result<(), Error> {
561        ensure!(self.binding.is_none(), "Already bound");
562        self.binding = Some(NamespaceBinding::create(&self.root_dir, path.to_string())?);
563        Ok(())
564    }
565
566    /// Remove the namespace binding to the root directory being served by this volume, if there is
567    /// one. If there is no binding, this function does nothing. After this, it is safe to call
568    /// bind_to_path again.
569    pub fn unbind_path(&mut self) {
570        let _ = self.binding.take();
571    }
572
573    pub fn bound_path(&self) -> Option<&str> {
574        self.binding.as_deref()
575    }
576
577    /// Returns a [`FilesystemInfo`] object containing information about the serving volume.
578    ///
579    /// # Errors
580    ///
581    /// Returns [`Err`] if querying the filesystem failed.
582    pub async fn query(&self) -> Result<Box<fio::FilesystemInfo>, QueryError> {
583        let (status, info) = self.root_dir.query_filesystem().await?;
584        Status::ok(status).map_err(QueryError::DirectoryQuery)?;
585        info.ok_or(QueryError::DirectoryEmptyResult)
586    }
587
588    /// Attempts to shutdown the filesystem using the [`fidl_fuchsia_fs::AdminProxy::shutdown()`]
589    /// FIDL method. Fails if the volume is not already open.
590    pub async fn shutdown(self) -> Result<(), Error> {
591        let admin_proxy = connect_to_protocol_at_dir_svc::<AdminMarker>(self.exposed_dir())?;
592        admin_proxy.shutdown().await.context("failed to shutdown volume")?;
593        Ok(())
594    }
595}
596
597impl ServingMultiVolumeFilesystem {
598    /// Returns whether the given volume exists.
599    pub async fn has_volume(&self, volume: &str) -> Result<bool, Error> {
600        let path = format!("volumes/{}", volume);
601        fuchsia_fs::directory::open_node(self.exposed_dir(), &path, fio::Flags::PROTOCOL_NODE)
602            .await
603            .map(|_| true)
604            .or_else(|e| {
605                if let fuchsia_fs::node::OpenError::OpenError(status) = &e {
606                    if *status == zx::Status::NOT_FOUND {
607                        return Ok(false);
608                    }
609                }
610                Err(e.into())
611            })
612    }
613
614    /// Creates and mounts the volume.  Fails if the volume already exists.
615    /// If `options.crypt` is set, the volume will be encrypted using the provided Crypt instance.
616    /// If `options.as_blob` is set, creates a blob volume that is mounted as a blob filesystem.
617    pub async fn create_volume(
618        &self,
619        volume: &str,
620        create_options: CreateOptions,
621        options: MountOptions,
622    ) -> Result<ServingVolume, Error> {
623        let (exposed_dir, server) = create_proxy::<fio::DirectoryMarker>();
624        connect_to_protocol_at_dir_root::<VolumesMarker>(self.exposed_dir())?
625            .create(volume, server, create_options, options)
626            .await?
627            .map_err(|e| anyhow!(zx::Status::from_raw(e)))?;
628        ServingVolume::new(exposed_dir)
629    }
630
631    /// Deletes the volume. Fails if the volume is already mounted.
632    pub async fn remove_volume(&self, volume: &str) -> Result<(), Error> {
633        connect_to_protocol_at_dir_root::<VolumesMarker>(self.exposed_dir())?
634            .remove(volume)
635            .await?
636            .map_err(|e| anyhow!(zx::Status::from_raw(e)))
637    }
638
639    /// Mounts an existing volume.  Fails if the volume is already mounted or doesn't exist.
640    /// If `crypt` is set, the volume will be decrypted using the provided Crypt instance.
641    pub async fn open_volume(
642        &self,
643        volume: &str,
644        options: MountOptions,
645    ) -> Result<ServingVolume, Error> {
646        let (exposed_dir, server) = create_proxy::<fio::DirectoryMarker>();
647        let path = format!("volumes/{}", volume);
648        connect_to_named_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumeMarker>(
649            self.exposed_dir(),
650            &path,
651        )?
652        .mount(server, options)
653        .await?
654        .map_err(|e| anyhow!(zx::Status::from_raw(e)))?;
655
656        ServingVolume::new(exposed_dir)
657    }
658
659    /// Returns volume info for `volume`.
660    pub async fn get_volume_info(
661        &self,
662        volume: &str,
663    ) -> Result<fidl_fuchsia_fs_startup::VolumeInfo, Error> {
664        let path = format!("volumes/{}", volume);
665        connect_to_named_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumeMarker>(
666            self.exposed_dir(),
667            &path,
668        )?
669        .get_info()
670        .await?
671        .map_err(|e| anyhow!(zx::Status::from_raw(e)))
672    }
673
674    /// Sets the max byte limit for a volume. Fails if the volume is not mounted.
675    pub async fn set_byte_limit(&self, volume: &str, byte_limit: u64) -> Result<(), Error> {
676        if byte_limit == 0 {
677            return Ok(());
678        }
679        let path = format!("volumes/{}", volume);
680        connect_to_named_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumeMarker>(
681            self.exposed_dir(),
682            &path,
683        )?
684        .set_limit(byte_limit)
685        .await?
686        .map_err(|e| anyhow!(zx::Status::from_raw(e)))
687    }
688
689    pub async fn check_volume(&self, volume: &str, options: CheckOptions) -> Result<(), Error> {
690        let path = format!("volumes/{}", volume);
691        connect_to_named_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumeMarker>(
692            self.exposed_dir(),
693            &path,
694        )?
695        .check(options)
696        .await?
697        .map_err(|e| anyhow!(zx::Status::from_raw(e)))?;
698        Ok(())
699    }
700
701    /// Provides access to the internal |exposed_dir| for use in testing
702    /// callsites which need directory access.
703    pub fn exposed_dir(&self) -> &fio::DirectoryProxy {
704        self.exposed_dir.as_ref().expect("BUG: exposed dir missing")
705    }
706
707    /// Attempts to shutdown the filesystem using the [`fidl_fuchsia_fs::AdminProxy::shutdown()`]
708    /// FIDL method.
709    ///
710    /// # Errors
711    ///
712    /// Returns [`Err`] if the shutdown failed.
713    pub async fn shutdown(mut self) -> Result<(), ShutdownError> {
714        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(
715            // Take exposed_dir so we don't attempt to shut down again in Drop.
716            &self.exposed_dir.take().expect("BUG: exposed dir missing"),
717        )?
718        .shutdown()
719        .await?;
720        Ok(())
721    }
722
723    /// Take the exposed dir from this filesystem instance, dropping the management struct without
724    /// shutting the filesystem down. This leaves the caller with the responsibility of shutting
725    /// down the filesystem, and the filesystem component if necessary.
726    pub fn take_exposed_dir(mut self) -> fio::DirectoryProxy {
727        self.component.take().expect("BUG: missing component").forget();
728        self.exposed_dir.take().expect("BUG: exposed dir missing")
729    }
730
731    /// Returns a list of volumes found in the filesystem.
732    pub async fn list_volumes(&self) -> Result<Vec<String>, Error> {
733        let volumes_dir = fuchsia_fs::directory::open_async::<fio::DirectoryMarker>(
734            self.exposed_dir(),
735            "volumes",
736            fio::PERM_READABLE,
737        )
738        .unwrap();
739        fuchsia_fs::directory::readdir(&volumes_dir)
740            .await
741            .map(|entries| entries.into_iter().map(|e| e.name).collect())
742            .map_err(|e| anyhow!("failed to read volumes dir: {}", e))
743    }
744
745    /// Returns the volume manager information.
746    pub async fn get_info(&self) -> Result<fblock::VolumeManagerInfo, Error> {
747        Ok(*connect_to_protocol_at_dir_root::<VolumesMarker>(self.exposed_dir())?
748            .get_info()
749            .await?
750            .map_err(|e| anyhow!(zx::Status::from_raw(e)))?
751            .ok_or_else(|| anyhow!("Missing info"))?)
752    }
753}
754
755impl Drop for ServingMultiVolumeFilesystem {
756    fn drop(&mut self) {
757        if let Some(exposed_dir) = self.exposed_dir.take() {
758            // Make a best effort attempt to shut down to the filesystem.
759            if let Ok(proxy) =
760                connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(&exposed_dir)
761            {
762                let _ = proxy.shutdown();
763            }
764        }
765    }
766}
767
768#[cfg(test)]
769mod tests {
770    use super::*;
771    use crate::{Blobfs, F2fs, Fxfs, Minfs};
772    use delivery_blob::{CompressionMode, Type1Blob};
773    use fidl::endpoints::DiscoverableProtocolMarker;
774    use fidl_fuchsia_fxfs::{BlobCreatorMarker, BlobReaderMarker};
775    use ramdevice_client::RamdiskClient;
776    use std::io::{Read as _, Write as _};
777
778    async fn ramdisk(block_size: u64) -> RamdiskClient {
779        RamdiskClient::create(block_size, 1 << 16).await.unwrap()
780    }
781
782    async fn new_fs<FSC: FSConfig>(ramdisk: &RamdiskClient, config: FSC) -> Filesystem {
783        let block_dir = fuchsia_fs::directory::clone(ramdisk.outgoing()).unwrap();
784        let block_connector =
785            DirBasedBlockConnector::new(block_dir, format!("svc/{}", BlockMarker::PROTOCOL_NAME));
786        Filesystem::new(block_connector, config)
787    }
788
789    #[fuchsia::test]
790    async fn blobfs_custom_config() {
791        let block_size = 512;
792        let ramdisk = ramdisk(block_size).await;
793        let config = Blobfs { verbose: true, readonly: true, ..Default::default() };
794        let mut blobfs = new_fs(&ramdisk, config).await;
795
796        blobfs.format().await.expect("failed to format blobfs");
797        blobfs.fsck().await.expect("failed to fsck blobfs");
798        let _ = blobfs.serve().await.expect("failed to serve blobfs");
799    }
800
801    #[fuchsia::test]
802    async fn blobfs_format_fsck_success() {
803        let block_size = 512;
804        let ramdisk = ramdisk(block_size).await;
805        let mut blobfs = new_fs(&ramdisk, Blobfs::default()).await;
806
807        blobfs.format().await.expect("failed to format blobfs");
808        blobfs.fsck().await.expect("failed to fsck blobfs");
809    }
810
811    #[fuchsia::test]
812    async fn blobfs_format_serve_write_query_restart_read_shutdown() {
813        let block_size = 512;
814        let ramdisk = ramdisk(block_size).await;
815        let mut blobfs = new_fs(&ramdisk, Blobfs::default()).await;
816
817        blobfs.format().await.expect("failed to format blobfs");
818
819        let serving = blobfs.serve().await.expect("failed to serve blobfs the first time");
820
821        // snapshot of FilesystemInfo
822        let fs_info1 =
823            serving.query().await.expect("failed to query filesystem info after first serving");
824
825        // pre-generated merkle test fixture data
826        let content = b"test content";
827        let merkle = fuchsia_merkle::root_from_slice(content);
828        let delivery_blob = Type1Blob::generate(content, CompressionMode::Never);
829
830        {
831            let creator = fuchsia_component_client::connect_to_protocol_at_dir_root::<
832                BlobCreatorMarker,
833            >(serving.exposed_dir())
834            .unwrap();
835            let writer = creator.create(&merkle.into(), false).await.unwrap().unwrap();
836            let mut writer =
837                blob_writer::BlobWriter::create(writer.into_proxy(), delivery_blob.len() as u64)
838                    .await
839                    .unwrap();
840            writer.write(&delivery_blob).await.unwrap();
841        }
842
843        // check against the snapshot FilesystemInfo
844        let fs_info2 = serving.query().await.expect("failed to query filesystem info after write");
845        assert_eq!(
846            fs_info2.used_bytes - fs_info1.used_bytes,
847            fs_info2.block_size as u64 // assuming content < 8K
848        );
849
850        serving.shutdown().await.expect("failed to shutdown blobfs the first time");
851        let blobfs = new_fs(&ramdisk, Blobfs::default()).await;
852        let serving = blobfs.serve().await.expect("failed to serve blobfs the second time");
853        {
854            let reader = fuchsia_component_client::connect_to_protocol_at_dir_root::<
855                BlobReaderMarker,
856            >(serving.exposed_dir())
857            .unwrap();
858            let vmo = reader.get_vmo(&merkle.into()).await.unwrap().unwrap();
859            let read_content = vmo.read_to_vec::<u8>(0, content.len() as u64).unwrap();
860            assert_eq!(read_content, content);
861        }
862
863        // once more check against the snapshot FilesystemInfo
864        let fs_info3 = serving.query().await.expect("failed to query filesystem info after read");
865        assert_eq!(
866            fs_info3.used_bytes - fs_info1.used_bytes,
867            fs_info3.block_size as u64 // assuming content < 8K
868        );
869
870        serving.shutdown().await.expect("failed to shutdown blobfs the second time");
871    }
872
873    #[fuchsia::test]
874    async fn blobfs_bind_to_path() {
875        let block_size = 512;
876        let test_content = b"test content";
877        let merkle = fuchsia_merkle::root_from_slice(test_content);
878        let delivery_blob = Type1Blob::generate(test_content, CompressionMode::Never);
879        let ramdisk = ramdisk(block_size).await;
880        let mut blobfs = new_fs(&ramdisk, Blobfs::default()).await;
881
882        blobfs.format().await.expect("failed to format blobfs");
883        let mut serving = blobfs.serve().await.expect("failed to serve blobfs");
884        serving.bind_to_path("/test-blobfs-path").expect("bind_to_path failed");
885
886        {
887            let creator = fuchsia_component_client::connect_to_protocol_at_dir_root::<
888                BlobCreatorMarker,
889            >(serving.exposed_dir())
890            .unwrap();
891            let writer = creator.create(&merkle.into(), false).await.unwrap().unwrap();
892            let mut writer =
893                blob_writer::BlobWriter::create(writer.into_proxy(), delivery_blob.len() as u64)
894                    .await
895                    .unwrap();
896            writer.write(&delivery_blob).await.unwrap();
897        }
898
899        let entries = std::fs::read_dir("/test-blobfs-path")
900            .unwrap()
901            .map(|entry| entry.unwrap().file_name().into_string().unwrap())
902            .collect::<Vec<_>>();
903        assert_eq!(entries, &[merkle.to_string()]);
904
905        serving.shutdown().await.expect("failed to shutdown blobfs");
906    }
907
908    #[fuchsia::test]
909    async fn minfs_custom_config() {
910        let block_size = 512;
911        let ramdisk = ramdisk(block_size).await;
912        let config = Minfs {
913            verbose: true,
914            readonly: true,
915            fsck_after_every_transaction: true,
916            ..Default::default()
917        };
918        let mut minfs = new_fs(&ramdisk, config).await;
919
920        minfs.format().await.expect("failed to format minfs");
921        minfs.fsck().await.expect("failed to fsck minfs");
922        let _ = minfs.serve().await.expect("failed to serve minfs");
923    }
924
925    #[fuchsia::test]
926    async fn minfs_format_fsck_success() {
927        let block_size = 8192;
928        let ramdisk = ramdisk(block_size).await;
929        let mut minfs = new_fs(&ramdisk, Minfs::default()).await;
930
931        minfs.format().await.expect("failed to format minfs");
932        minfs.fsck().await.expect("failed to fsck minfs");
933    }
934
935    #[fuchsia::test]
936    async fn minfs_format_serve_write_query_restart_read_shutdown() {
937        let block_size = 8192;
938        let ramdisk = ramdisk(block_size).await;
939        let mut minfs = new_fs(&ramdisk, Minfs::default()).await;
940
941        minfs.format().await.expect("failed to format minfs");
942        let serving = minfs.serve().await.expect("failed to serve minfs the first time");
943
944        // snapshot of FilesystemInfo
945        let fs_info1 =
946            serving.query().await.expect("failed to query filesystem info after first serving");
947
948        let filename = "test_file";
949        let content = String::from("test content").into_bytes();
950
951        {
952            let test_file = fuchsia_fs::directory::open_file(
953                serving.root(),
954                filename,
955                fio::Flags::FLAG_MAYBE_CREATE | fio::PERM_WRITABLE,
956            )
957            .await
958            .expect("failed to create test file");
959            let _: u64 = test_file
960                .write(&content)
961                .await
962                .expect("failed to write to test file")
963                .map_err(Status::from_raw)
964                .expect("write error");
965        }
966
967        // check against the snapshot FilesystemInfo
968        let fs_info2 = serving.query().await.expect("failed to query filesystem info after write");
969        assert_eq!(
970            fs_info2.used_bytes - fs_info1.used_bytes,
971            fs_info2.block_size as u64 // assuming content < 8K
972        );
973
974        serving.shutdown().await.expect("failed to shutdown minfs the first time");
975        let minfs = new_fs(&ramdisk, Minfs::default()).await;
976        let serving = minfs.serve().await.expect("failed to serve minfs the second time");
977
978        {
979            let test_file =
980                fuchsia_fs::directory::open_file(serving.root(), filename, fio::PERM_READABLE)
981                    .await
982                    .expect("failed to open test file");
983            let read_content =
984                fuchsia_fs::file::read(&test_file).await.expect("failed to read from test file");
985            assert_eq!(content, read_content);
986        }
987
988        // once more check against the snapshot FilesystemInfo
989        let fs_info3 = serving.query().await.expect("failed to query filesystem info after read");
990        assert_eq!(
991            fs_info3.used_bytes - fs_info1.used_bytes,
992            fs_info3.block_size as u64 // assuming content < 8K
993        );
994
995        let _ = serving.shutdown().await.expect("failed to shutdown minfs the second time");
996    }
997
998    #[fuchsia::test]
999    async fn minfs_bind_to_path() {
1000        let block_size = 8192;
1001        let test_content = b"test content";
1002        let ramdisk = ramdisk(block_size).await;
1003        let mut minfs = new_fs(&ramdisk, Minfs::default()).await;
1004
1005        minfs.format().await.expect("failed to format minfs");
1006        let mut serving = minfs.serve().await.expect("failed to serve minfs");
1007        serving.bind_to_path("/test-minfs-path").expect("bind_to_path failed");
1008        let test_path = "/test-minfs-path/test_file";
1009
1010        {
1011            let mut file = std::fs::File::create(test_path).expect("failed to create test file");
1012            file.write_all(test_content).expect("write bytes");
1013        }
1014
1015        {
1016            let mut file = std::fs::File::open(test_path).expect("failed to open test file");
1017            let mut buf = Vec::new();
1018            file.read_to_end(&mut buf).expect("failed to read test file");
1019            assert_eq!(buf, test_content);
1020        }
1021
1022        serving.shutdown().await.expect("failed to shutdown minfs");
1023
1024        std::fs::File::open(test_path).expect_err("test file was not unbound");
1025    }
1026
1027    #[fuchsia::test]
1028    async fn minfs_take_exposed_dir_does_not_drop() {
1029        let block_size = 512;
1030        let test_content = b"test content";
1031        let test_file_name = "test-file";
1032        let ramdisk = ramdisk(block_size).await;
1033        let mut minfs = new_fs(&ramdisk, Minfs::default()).await;
1034
1035        minfs.format().await.expect("failed to format fxfs");
1036
1037        let fs = minfs.serve().await.expect("failed to serve fxfs");
1038        let file = {
1039            let file = fuchsia_fs::directory::open_file(
1040                fs.root(),
1041                test_file_name,
1042                fio::Flags::FLAG_MAYBE_CREATE | fio::PERM_READABLE | fio::PERM_WRITABLE,
1043            )
1044            .await
1045            .unwrap();
1046            fuchsia_fs::file::write(&file, test_content).await.unwrap();
1047            file.close().await.expect("close fidl error").expect("close error");
1048            fuchsia_fs::directory::open_file(fs.root(), test_file_name, fio::PERM_READABLE)
1049                .await
1050                .unwrap()
1051        };
1052
1053        let exposed_dir = fs.take_exposed_dir();
1054
1055        assert_eq!(fuchsia_fs::file::read(&file).await.unwrap(), test_content);
1056
1057        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(&exposed_dir)
1058            .expect("connecting to admin marker")
1059            .shutdown()
1060            .await
1061            .expect("shutdown failed");
1062    }
1063
1064    #[fuchsia::test]
1065    async fn f2fs_format_fsck_success() {
1066        let block_size = 4096;
1067        let ramdisk = ramdisk(block_size).await;
1068        let mut f2fs = new_fs(&ramdisk, F2fs::default()).await;
1069
1070        f2fs.format().await.expect("failed to format f2fs");
1071        f2fs.fsck().await.expect("failed to fsck f2fs");
1072    }
1073
1074    #[fuchsia::test]
1075    async fn f2fs_format_serve_write_query_restart_read_shutdown() {
1076        let block_size = 4096;
1077        let ramdisk = ramdisk(block_size).await;
1078        let mut f2fs = new_fs(&ramdisk, F2fs::default()).await;
1079
1080        f2fs.format().await.expect("failed to format f2fs");
1081        let serving = f2fs.serve().await.expect("failed to serve f2fs the first time");
1082
1083        // snapshot of FilesystemInfo
1084        let fs_info1 =
1085            serving.query().await.expect("failed to query filesystem info after first serving");
1086
1087        let filename = "test_file";
1088        let content = String::from("test content").into_bytes();
1089
1090        {
1091            let test_file = fuchsia_fs::directory::open_file(
1092                serving.root(),
1093                filename,
1094                fio::Flags::FLAG_MAYBE_CREATE | fio::PERM_WRITABLE,
1095            )
1096            .await
1097            .expect("failed to create test file");
1098            let _: u64 = test_file
1099                .write(&content)
1100                .await
1101                .expect("failed to write to test file")
1102                .map_err(Status::from_raw)
1103                .expect("write error");
1104        }
1105
1106        // check against the snapshot FilesystemInfo
1107        let fs_info2 = serving.query().await.expect("failed to query filesystem info after write");
1108        // With zx::stream, f2fs doesn't support the inline data feature allowing file
1109        // inode blocks to include small data. This way requires keeping two copies of VMOs
1110        // for the same inline data
1111        // assuming content < 4K and its inode block.
1112        let expected_size2 = fs_info2.block_size * 2;
1113        assert_eq!(fs_info2.used_bytes - fs_info1.used_bytes, expected_size2 as u64);
1114
1115        serving.shutdown().await.expect("failed to shutdown f2fs the first time");
1116        let f2fs = new_fs(&ramdisk, F2fs::default()).await;
1117        let serving = f2fs.serve().await.expect("failed to serve f2fs the second time");
1118
1119        {
1120            let test_file =
1121                fuchsia_fs::directory::open_file(serving.root(), filename, fio::PERM_READABLE)
1122                    .await
1123                    .expect("failed to open test file");
1124            let read_content =
1125                fuchsia_fs::file::read(&test_file).await.expect("failed to read from test file");
1126            assert_eq!(content, read_content);
1127        }
1128
1129        // once more check against the snapshot FilesystemInfo
1130        let fs_info3 = serving.query().await.expect("failed to query filesystem info after read");
1131        // assuming content < 4K and its inode block.
1132        let expected_size3 = fs_info3.block_size * 2;
1133        assert_eq!(fs_info3.used_bytes - fs_info1.used_bytes, expected_size3 as u64);
1134
1135        serving.shutdown().await.expect("failed to shutdown f2fs the second time");
1136        let mut f2fs = new_fs(&ramdisk, F2fs::default()).await;
1137        f2fs.fsck().await.expect("failed to fsck f2fs after shutting down the second time");
1138    }
1139
1140    #[fuchsia::test]
1141    async fn f2fs_bind_to_path() {
1142        let block_size = 4096;
1143        let test_content = b"test content";
1144        let ramdisk = ramdisk(block_size).await;
1145        let mut f2fs = new_fs(&ramdisk, F2fs::default()).await;
1146
1147        f2fs.format().await.expect("failed to format f2fs");
1148        let mut serving = f2fs.serve().await.expect("failed to serve f2fs");
1149        serving.bind_to_path("/test-f2fs-path").expect("bind_to_path failed");
1150        let test_path = "/test-f2fs-path/test_file";
1151
1152        {
1153            let mut file = std::fs::File::create(test_path).expect("failed to create test file");
1154            file.write_all(test_content).expect("write bytes");
1155        }
1156
1157        {
1158            let mut file = std::fs::File::open(test_path).expect("failed to open test file");
1159            let mut buf = Vec::new();
1160            file.read_to_end(&mut buf).expect("failed to read test file");
1161            assert_eq!(buf, test_content);
1162        }
1163
1164        serving.shutdown().await.expect("failed to shutdown f2fs");
1165
1166        std::fs::File::open(test_path).expect_err("test file was not unbound");
1167    }
1168
1169    #[fuchsia::test]
1170    async fn fxfs_open_volume() {
1171        let block_size = 512;
1172        let ramdisk = ramdisk(block_size).await;
1173        let mut fxfs = new_fs(&ramdisk, Fxfs::default()).await;
1174
1175        fxfs.format().await.expect("failed to format fxfs");
1176
1177        let fs = fxfs.serve_multi_volume().await.expect("failed to serve fxfs");
1178
1179        assert_eq!(fs.has_volume("foo").await.expect("has_volume"), false);
1180        assert!(
1181            fs.open_volume("foo", MountOptions::default()).await.is_err(),
1182            "Opening nonexistent volume should fail"
1183        );
1184
1185        let vol = fs
1186            .create_volume("foo", CreateOptions::default(), MountOptions::default())
1187            .await
1188            .expect("Create volume failed");
1189        vol.query().await.expect("Query volume failed");
1190        // TODO(https://fxbug.dev/42057878) Closing the volume is not synchronous. Immediately reopening the
1191        // volume will race with the asynchronous close and sometimes fail because the volume is
1192        // still mounted.
1193        // fs.open_volume("foo", MountOptions{crypt: None, as_blob: false}).await
1194        //    .expect("Open volume failed");
1195        assert_eq!(fs.has_volume("foo").await.expect("has_volume"), true);
1196    }
1197
1198    #[fuchsia::test]
1199    async fn fxfs_take_exposed_dir_does_not_drop() {
1200        let block_size = 512;
1201        let test_content = b"test content";
1202        let test_file_name = "test-file";
1203        let ramdisk = ramdisk(block_size).await;
1204        let mut fxfs = new_fs(&ramdisk, Fxfs::default()).await;
1205
1206        fxfs.format().await.expect("failed to format fxfs");
1207
1208        let fs = fxfs.serve_multi_volume().await.expect("failed to serve fxfs");
1209        let file = {
1210            let vol = fs
1211                .create_volume("foo", CreateOptions::default(), MountOptions::default())
1212                .await
1213                .expect("Create volume failed");
1214            let file = fuchsia_fs::directory::open_file(
1215                vol.root(),
1216                test_file_name,
1217                fio::Flags::FLAG_MAYBE_CREATE | fio::PERM_READABLE | fio::PERM_WRITABLE,
1218            )
1219            .await
1220            .unwrap();
1221            fuchsia_fs::file::write(&file, test_content).await.unwrap();
1222            file.close().await.expect("close fidl error").expect("close error");
1223            fuchsia_fs::directory::open_file(vol.root(), test_file_name, fio::PERM_READABLE)
1224                .await
1225                .unwrap()
1226        };
1227
1228        let exposed_dir = fs.take_exposed_dir();
1229
1230        assert_eq!(fuchsia_fs::file::read(&file).await.unwrap(), test_content);
1231
1232        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(&exposed_dir)
1233            .expect("connecting to admin marker")
1234            .shutdown()
1235            .await
1236            .expect("shutdown failed");
1237    }
1238}