blobfs_ramdisk/
lib.rs

1// Copyright 2019 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#![deny(missing_docs)]
6#![allow(clippy::let_unit_value)]
7
8//! Test utilities for starting a blobfs server.
9
10use anyhow::{anyhow, Context as _, Error};
11use delivery_blob::{delivery_blob_path, CompressionMode, Type1Blob};
12use fidl::endpoints::ClientEnd;
13use fidl_fuchsia_fs_startup::{CreateOptions, MountOptions};
14use fuchsia_merkle::Hash;
15use std::borrow::Cow;
16use std::collections::BTreeSet;
17use {fidl_fuchsia_fxfs as ffxfs, fidl_fuchsia_io as fio};
18
19const RAMDISK_BLOCK_SIZE: u64 = 512;
20static FXFS_BLOB_VOLUME_NAME: &str = "blob";
21
22#[cfg(test)]
23mod test;
24
25/// A blob's hash, length, and contents.
26#[derive(Debug, Clone)]
27pub struct BlobInfo {
28    merkle: Hash,
29    contents: Cow<'static, [u8]>,
30}
31
32impl<B> From<B> for BlobInfo
33where
34    B: Into<Cow<'static, [u8]>>,
35{
36    fn from(bytes: B) -> Self {
37        let bytes = bytes.into();
38        Self { merkle: fuchsia_merkle::from_slice(&bytes).root(), contents: bytes }
39    }
40}
41
42/// A helper to construct [`BlobfsRamdisk`] instances.
43pub struct BlobfsRamdiskBuilder {
44    ramdisk: Option<SuppliedRamdisk>,
45    blobs: Vec<BlobInfo>,
46    implementation: Implementation,
47}
48
49enum SuppliedRamdisk {
50    Formatted(FormattedRamdisk),
51    Unformatted(Ramdisk),
52}
53
54#[derive(Debug, Clone, Copy, PartialEq, Eq)]
55/// The blob filesystem implementation to use.
56pub enum Implementation {
57    /// The older C++ implementation.
58    CppBlobfs,
59    /// The newer Rust implementation that uses FxFs.
60    Fxblob,
61}
62
63impl Implementation {
64    /// The production blobfs implementation (and downstream decisions like whether pkg-cache
65    /// should use /blob or fuchsia.fxfs/BlobCreator to write blobs) is determined by a GN
66    /// variable. This function returns the implementation determined by said GN variable, so that
67    /// clients inheriting production configs can create a BlobfsRamdisk backed by the appropriate
68    /// implementation.
69    pub fn from_env() -> Self {
70        match env!("FXFS_BLOB") {
71            "true" => Self::Fxblob,
72            "false" => Self::CppBlobfs,
73            other => panic!("unexpected value for env var 'FXFS_BLOB': {other}"),
74        }
75    }
76}
77
78impl BlobfsRamdiskBuilder {
79    fn new() -> Self {
80        Self { ramdisk: None, blobs: vec![], implementation: Implementation::CppBlobfs }
81    }
82
83    /// Configures this blobfs to use the already formatted given backing ramdisk.
84    pub fn formatted_ramdisk(self, ramdisk: FormattedRamdisk) -> Self {
85        Self { ramdisk: Some(SuppliedRamdisk::Formatted(ramdisk)), ..self }
86    }
87
88    /// Configures this blobfs to use the supplied unformatted ramdisk.
89    pub fn ramdisk(self, ramdisk: Ramdisk) -> Self {
90        Self { ramdisk: Some(SuppliedRamdisk::Unformatted(ramdisk)), ..self }
91    }
92
93    /// Write the provided blob after mounting blobfs if the blob does not already exist.
94    pub fn with_blob(mut self, blob: impl Into<BlobInfo>) -> Self {
95        self.blobs.push(blob.into());
96        self
97    }
98
99    /// Use the blobfs implementation of the blob file system (the older C++ implementation that
100    /// provides a fuchsia.io interface).
101    pub fn cpp_blobfs(self) -> Self {
102        Self { implementation: Implementation::CppBlobfs, ..self }
103    }
104
105    /// Use the fxblob implementation of the blob file system (the newer Rust implementation built
106    /// on fxfs that has a custom FIDL interface).
107    pub fn fxblob(self) -> Self {
108        Self { implementation: Implementation::Fxblob, ..self }
109    }
110
111    /// Use the provided blobfs implementation.
112    pub fn implementation(self, implementation: Implementation) -> Self {
113        Self { implementation, ..self }
114    }
115
116    /// Use the blobfs implementation that would be active in the production configuration.
117    pub fn impl_from_env(self) -> Self {
118        self.implementation(Implementation::from_env())
119    }
120
121    /// Starts a blobfs server with the current configuration options.
122    pub async fn start(self) -> Result<BlobfsRamdisk, Error> {
123        let Self { ramdisk, blobs, implementation } = self;
124        let (ramdisk, needs_format) = match ramdisk {
125            Some(SuppliedRamdisk::Formatted(FormattedRamdisk(ramdisk))) => (ramdisk, false),
126            Some(SuppliedRamdisk::Unformatted(ramdisk)) => (ramdisk, true),
127            None => (Ramdisk::start().await.context("creating backing ramdisk for blobfs")?, true),
128        };
129
130        let ramdisk_controller = ramdisk.client.open_controller()?.into_proxy();
131
132        // Spawn blobfs on top of the ramdisk.
133        let mut fs = match implementation {
134            Implementation::CppBlobfs => fs_management::filesystem::Filesystem::new(
135                ramdisk_controller,
136                fs_management::Blobfs { ..fs_management::Blobfs::dynamic_child() },
137            ),
138            Implementation::Fxblob => fs_management::filesystem::Filesystem::new(
139                ramdisk_controller,
140                fs_management::Fxfs::default(),
141            ),
142        };
143        if needs_format {
144            let () = fs.format().await.context("formatting ramdisk")?;
145        }
146
147        let fs = match implementation {
148            Implementation::CppBlobfs => ServingFilesystem::SingleVolume(
149                fs.serve().await.context("serving single volume filesystem")?,
150            ),
151            Implementation::Fxblob => {
152                let mut fs =
153                    fs.serve_multi_volume().await.context("serving multi volume filesystem")?;
154                if needs_format {
155                    let _: &mut fs_management::filesystem::ServingVolume = fs
156                        .create_volume(
157                            FXFS_BLOB_VOLUME_NAME,
158                            CreateOptions::default(),
159                            MountOptions { as_blob: Some(true), ..MountOptions::default() },
160                        )
161                        .await
162                        .context("creating blob volume")?;
163                } else {
164                    let _: &mut fs_management::filesystem::ServingVolume = fs
165                        .open_volume(
166                            FXFS_BLOB_VOLUME_NAME,
167                            MountOptions { as_blob: Some(true), ..MountOptions::default() },
168                        )
169                        .await
170                        .context("opening blob volume")?;
171                }
172                ServingFilesystem::MultiVolume(fs)
173            }
174        };
175
176        let blobfs = BlobfsRamdisk { backing_ramdisk: FormattedRamdisk(ramdisk), fs };
177
178        // Write all the requested missing blobs to the mounted filesystem.
179        if !blobs.is_empty() {
180            let mut present_blobs = blobfs.list_blobs()?;
181
182            for blob in blobs {
183                if present_blobs.contains(&blob.merkle) {
184                    continue;
185                }
186                blobfs
187                    .write_blob(blob.merkle, &blob.contents)
188                    .await
189                    .context(format!("writing {}", blob.merkle))?;
190                present_blobs.insert(blob.merkle);
191            }
192        }
193
194        Ok(blobfs)
195    }
196}
197
198/// A ramdisk-backed blobfs instance
199pub struct BlobfsRamdisk {
200    backing_ramdisk: FormattedRamdisk,
201    fs: ServingFilesystem,
202}
203
204/// The old blobfs can only be served out of a single volume filesystem, but the new fxblob can
205/// only be served out of a multi volume filesystem (which we create with just a single volume
206/// with the name coming from `FXFS_BLOB_VOLUME_NAME`). This enum allows `BlobfsRamdisk` to
207/// wrap either blobfs or fxblob.
208enum ServingFilesystem {
209    SingleVolume(fs_management::filesystem::ServingSingleVolumeFilesystem),
210    MultiVolume(fs_management::filesystem::ServingMultiVolumeFilesystem),
211}
212
213impl ServingFilesystem {
214    async fn shutdown(self) -> Result<(), Error> {
215        match self {
216            Self::SingleVolume(fs) => fs.shutdown().await.context("shutting down single volume"),
217            Self::MultiVolume(fs) => fs.shutdown().await.context("shutting down multi volume"),
218        }
219    }
220
221    fn exposed_dir(&self) -> Result<&fio::DirectoryProxy, Error> {
222        match self {
223            Self::SingleVolume(fs) => Ok(fs.exposed_dir()),
224            Self::MultiVolume(fs) => Ok(fs
225                .volume(FXFS_BLOB_VOLUME_NAME)
226                .ok_or(anyhow!("missing blob volume"))?
227                .exposed_dir()),
228        }
229    }
230
231    /// The name of the blob root directory in the exposed directory.
232    fn blob_dir_name(&self) -> &'static str {
233        match self {
234            Self::SingleVolume(_) => "blob-exec",
235            Self::MultiVolume(_) => "root",
236        }
237    }
238
239    /// None if the filesystem does not expose any services.
240    fn svc_dir(&self) -> Result<Option<fio::DirectoryProxy>, Error> {
241        match self {
242            Self::SingleVolume(_) => Ok(None),
243            Self::MultiVolume(_) => Ok(Some(
244                fuchsia_fs::directory::open_directory_async(
245                    self.exposed_dir()?,
246                    "svc",
247                    fio::PERM_READABLE,
248                )
249                .context("opening svc dir")?,
250            )),
251        }
252    }
253
254    /// None if the filesystem does not support the API.
255    fn blob_creator_proxy(&self) -> Result<Option<ffxfs::BlobCreatorProxy>, Error> {
256        Ok(match self.svc_dir()? {
257            Some(d) => Some(
258                fuchsia_component::client::connect_to_protocol_at_dir_root::<
259                    ffxfs::BlobCreatorMarker,
260                >(&d)
261                .context("connecting to fuchsia.fxfs.BlobCreator")?,
262            ),
263            None => None,
264        })
265    }
266
267    /// None if the filesystem does not support the API.
268    fn blob_reader_proxy(&self) -> Result<Option<ffxfs::BlobReaderProxy>, Error> {
269        Ok(match self.svc_dir()? {
270            Some(d) => {
271                Some(
272                    fuchsia_component::client::connect_to_protocol_at_dir_root::<
273                        ffxfs::BlobReaderMarker,
274                    >(&d)
275                    .context("connecting to fuchsia.fxfs.BlobReader")?,
276                )
277            }
278            None => None,
279        })
280    }
281
282    fn implementation(&self) -> Implementation {
283        match self {
284            Self::SingleVolume(_) => Implementation::CppBlobfs,
285            Self::MultiVolume(_) => Implementation::Fxblob,
286        }
287    }
288}
289
290impl BlobfsRamdisk {
291    /// Creates a new [`BlobfsRamdiskBuilder`] with no pre-configured ramdisk.
292    pub fn builder() -> BlobfsRamdiskBuilder {
293        BlobfsRamdiskBuilder::new()
294    }
295
296    /// Starts a blobfs server backed by a freshly formatted ramdisk.
297    pub async fn start() -> Result<Self, Error> {
298        Self::builder().start().await
299    }
300
301    /// Returns a new connection to blobfs using the blobfs::Client wrapper type.
302    ///
303    /// # Panics
304    ///
305    /// Panics on error
306    pub fn client(&self) -> blobfs::Client {
307        blobfs::Client::new(
308            self.root_dir_proxy().unwrap(),
309            self.blob_creator_proxy().unwrap(),
310            self.blob_reader_proxy().unwrap(),
311            None,
312        )
313        .unwrap()
314    }
315
316    /// Returns a new connection to blobfs's root directory as a raw zircon channel.
317    pub fn root_dir_handle(&self) -> Result<ClientEnd<fio::DirectoryMarker>, Error> {
318        let (root_clone, server_end) = zx::Channel::create();
319        self.fs.exposed_dir()?.open(
320            self.fs.blob_dir_name(),
321            fio::PERM_READABLE | fio::Flags::PERM_INHERIT_WRITE | fio::Flags::PERM_EXECUTE,
322            &Default::default(),
323            server_end,
324        )?;
325        Ok(root_clone.into())
326    }
327
328    /// Returns a new connection to blobfs's root directory as a DirectoryProxy.
329    pub fn root_dir_proxy(&self) -> Result<fio::DirectoryProxy, Error> {
330        Ok(self.root_dir_handle()?.into_proxy())
331    }
332
333    /// Returns a new connection to blobfs's root directory as a openat::Dir.
334    pub fn root_dir(&self) -> Result<openat::Dir, Error> {
335        use std::os::fd::{FromRawFd as _, IntoRawFd as _, OwnedFd};
336
337        let fd: OwnedFd =
338            fdio::create_fd(self.root_dir_handle()?.into()).context("failed to create fd")?;
339
340        // SAFETY: `openat::Dir` requires that the file descriptor is a directory, which we are
341        // guaranteed because `root_dir_handle()` implements the directory FIDL interface. There is
342        // not a direct way to transfer ownership from an `OwnedFd` to `openat::Dir`, so we need to
343        // convert the fd into a `RawFd` before handing it off to `Dir`.
344        unsafe { Ok(openat::Dir::from_raw_fd(fd.into_raw_fd())) }
345    }
346
347    /// Signals blobfs to unmount and waits for it to exit cleanly, returning a new
348    /// [`BlobfsRamdiskBuilder`] initialized with the ramdisk.
349    pub async fn into_builder(self) -> Result<BlobfsRamdiskBuilder, Error> {
350        let implementation = self.fs.implementation();
351        let ramdisk = self.unmount().await?;
352        Ok(Self::builder().formatted_ramdisk(ramdisk).implementation(implementation))
353    }
354
355    /// Signals blobfs to unmount and waits for it to exit cleanly, returning the backing Ramdisk.
356    pub async fn unmount(self) -> Result<FormattedRamdisk, Error> {
357        self.fs.shutdown().await?;
358        Ok(self.backing_ramdisk)
359    }
360
361    /// Signals blobfs to unmount and waits for it to exit cleanly, stopping the inner ramdisk.
362    pub async fn stop(self) -> Result<(), Error> {
363        self.unmount().await?.stop().await
364    }
365
366    /// Returns a sorted list of all blobs present in this blobfs instance.
367    pub fn list_blobs(&self) -> Result<BTreeSet<Hash>, Error> {
368        self.root_dir()?
369            .list_dir(".")?
370            .map(|entry| {
371                Ok(entry?
372                    .file_name()
373                    .to_str()
374                    .ok_or_else(|| anyhow!("expected valid utf-8"))?
375                    .parse()?)
376            })
377            .collect()
378    }
379
380    /// Writes the blob to blobfs.
381    pub async fn add_blob_from(
382        &self,
383        merkle: Hash,
384        mut source: impl std::io::Read,
385    ) -> Result<(), Error> {
386        let mut bytes = vec![];
387        source.read_to_end(&mut bytes)?;
388        self.write_blob(merkle, &bytes).await
389    }
390
391    /// Writes a blob with hash `merkle` and blob contents `bytes` to blobfs. `bytes` should be
392    /// uncompressed. Ignores AlreadyExists errors.
393    pub async fn write_blob(&self, merkle: Hash, bytes: &[u8]) -> Result<(), Error> {
394        let compressed_data = Type1Blob::generate(bytes, CompressionMode::Attempt);
395        match self.fs {
396            ServingFilesystem::SingleVolume(_) => {
397                use std::io::Write as _;
398                let mut file =
399                    match self.root_dir().unwrap().new_file(delivery_blob_path(merkle), 0o600) {
400                        Ok(file) => file,
401                        Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => {
402                            // blob is being written or already written
403                            return Ok(());
404                        }
405                        Err(e) => {
406                            return Err(e.into());
407                        }
408                    };
409                file.set_len(compressed_data.len().try_into().unwrap())?;
410                file.write_all(&compressed_data)?;
411            }
412            ServingFilesystem::MultiVolume(_) => {
413                let blob_creator = self.blob_creator_proxy()?.ok_or_else(|| {
414                    anyhow!("The filesystem does not expose the BlobCreator service")
415                })?;
416                let writer_client_end = match blob_creator.create(&merkle.into(), false).await? {
417                    Ok(writer_client_end) => writer_client_end,
418                    Err(ffxfs::CreateBlobError::AlreadyExists) => {
419                        return Ok(());
420                    }
421                    Err(e) => {
422                        return Err(anyhow!("create blob error {:?}", e));
423                    }
424                };
425                let writer = writer_client_end.into_proxy();
426                let mut blob_writer =
427                    blob_writer::BlobWriter::create(writer, compressed_data.len() as u64)
428                        .await
429                        .context("failed to create BlobWriter")?;
430                blob_writer.write(&compressed_data).await?;
431            }
432        }
433        Ok(())
434    }
435
436    /// Returns a connection to blobfs's exposed "svc" directory, or None if the
437    /// implementation does not expose any services.
438    /// More convenient than using `blob_creator_proxy` directly when forwarding the service
439    /// to RealmBuilder components.
440    pub fn svc_dir(&self) -> Result<Option<fio::DirectoryProxy>, Error> {
441        self.fs.svc_dir()
442    }
443
444    /// Returns a new connection to blobfs's fuchsia.fxfs/BlobCreator API, or None if the
445    /// implementation does not support it.
446    pub fn blob_creator_proxy(&self) -> Result<Option<ffxfs::BlobCreatorProxy>, Error> {
447        self.fs.blob_creator_proxy()
448    }
449
450    /// Returns a new connection to blobfs's fuchsia.fxfs/BlobReader API, or None if the
451    /// implementation does not support it.
452    pub fn blob_reader_proxy(&self) -> Result<Option<ffxfs::BlobReaderProxy>, Error> {
453        self.fs.blob_reader_proxy()
454    }
455}
456
457/// A helper to construct [`Ramdisk`] instances.
458pub struct RamdiskBuilder {
459    block_count: u64,
460}
461
462impl RamdiskBuilder {
463    fn new() -> Self {
464        Self { block_count: 1 << 20 }
465    }
466
467    /// Set the block count of the [`Ramdisk`].
468    pub fn block_count(mut self, block_count: u64) -> Self {
469        self.block_count = block_count;
470        self
471    }
472
473    /// Starts a new ramdisk.
474    pub async fn start(self) -> Result<Ramdisk, Error> {
475        let client = ramdevice_client::RamdiskClient::builder(RAMDISK_BLOCK_SIZE, self.block_count);
476        let client = client.build().await?;
477        Ok(Ramdisk { client })
478    }
479
480    /// Create a [`BlobfsRamdiskBuilder`] that uses this as its backing ramdisk.
481    pub async fn into_blobfs_builder(self) -> Result<BlobfsRamdiskBuilder, Error> {
482        Ok(BlobfsRamdiskBuilder::new().ramdisk(self.start().await?))
483    }
484}
485
486/// A virtual memory-backed block device.
487pub struct Ramdisk {
488    client: ramdevice_client::RamdiskClient,
489}
490
491// FormattedRamdisk Derefs to Ramdisk, which is only safe if all of the &self Ramdisk methods
492// preserve the blobfs formatting.
493impl Ramdisk {
494    /// Create a RamdiskBuilder that defaults to 1024 * 1024 blocks of size 512 bytes.
495    pub fn builder() -> RamdiskBuilder {
496        RamdiskBuilder::new()
497    }
498
499    /// Starts a new ramdisk with 1024 * 1024 blocks and a block size of 512 bytes, resulting in a
500    /// drive with 512MiB capacity.
501    pub async fn start() -> Result<Self, Error> {
502        Self::builder().start().await
503    }
504
505    /// Shuts down this ramdisk.
506    pub async fn stop(self) -> Result<(), Error> {
507        self.client.destroy().await
508    }
509}
510
511/// A [`Ramdisk`] formatted for use by blobfs.
512pub struct FormattedRamdisk(Ramdisk);
513
514// This is safe as long as all of the &self methods of Ramdisk maintain the blobfs formatting.
515impl std::ops::Deref for FormattedRamdisk {
516    type Target = Ramdisk;
517    fn deref(&self) -> &Self::Target {
518        &self.0
519    }
520}
521
522impl FormattedRamdisk {
523    /// Shuts down this ramdisk.
524    pub async fn stop(self) -> Result<(), Error> {
525        self.0.stop().await
526    }
527}
528
529#[cfg(test)]
530mod tests {
531    use super::*;
532    use std::io::Write as _;
533
534    #[fuchsia_async::run_singlethreaded(test)]
535    async fn clean_start_and_stop() {
536        let blobfs = BlobfsRamdisk::start().await.unwrap();
537
538        let proxy = blobfs.root_dir_proxy().unwrap();
539        drop(proxy);
540
541        blobfs.stop().await.unwrap();
542    }
543
544    #[fuchsia_async::run_singlethreaded(test)]
545    async fn clean_start_contains_no_blobs() {
546        let blobfs = BlobfsRamdisk::start().await.unwrap();
547
548        assert_eq!(blobfs.list_blobs().unwrap(), BTreeSet::new());
549
550        blobfs.stop().await.unwrap();
551    }
552
553    #[test]
554    fn blob_info_conversions() {
555        let a = BlobInfo::from(&b"static slice"[..]);
556        let b = BlobInfo::from(b"owned vec".to_vec());
557        let c = BlobInfo::from(Cow::from(&b"cow"[..]));
558        assert_ne!(a.merkle, b.merkle);
559        assert_ne!(b.merkle, c.merkle);
560        assert_eq!(a.merkle, fuchsia_merkle::from_slice(&b"static slice"[..]).root());
561
562        // Verify the following calling patterns build, but don't bother building the ramdisk.
563        let _ = BlobfsRamdisk::builder()
564            .with_blob(&b"static slice"[..])
565            .with_blob(b"owned vec".to_vec())
566            .with_blob(Cow::from(&b"cow"[..]));
567    }
568
569    #[fuchsia_async::run_singlethreaded(test)]
570    async fn with_blob_ignores_duplicates() {
571        let blob = BlobInfo::from(&b"duplicate"[..]);
572
573        let blobfs = BlobfsRamdisk::builder()
574            .with_blob(blob.clone())
575            .with_blob(blob.clone())
576            .start()
577            .await
578            .unwrap();
579        assert_eq!(blobfs.list_blobs().unwrap(), BTreeSet::from([blob.merkle]));
580
581        let blobfs =
582            blobfs.into_builder().await.unwrap().with_blob(blob.clone()).start().await.unwrap();
583        assert_eq!(blobfs.list_blobs().unwrap(), BTreeSet::from([blob.merkle]));
584    }
585
586    #[fuchsia_async::run_singlethreaded(test)]
587    async fn build_with_two_blobs() {
588        let blobfs = BlobfsRamdisk::builder()
589            .with_blob(&b"blob 1"[..])
590            .with_blob(&b"blob 2"[..])
591            .start()
592            .await
593            .unwrap();
594
595        let expected = BTreeSet::from([
596            fuchsia_merkle::from_slice(&b"blob 1"[..]).root(),
597            fuchsia_merkle::from_slice(&b"blob 2"[..]).root(),
598        ]);
599        assert_eq!(expected.len(), 2);
600        assert_eq!(blobfs.list_blobs().unwrap(), expected);
601
602        blobfs.stop().await.unwrap();
603    }
604
605    #[fuchsia_async::run_singlethreaded(test)]
606    async fn blobfs_remount() {
607        let blobfs =
608            BlobfsRamdisk::builder().cpp_blobfs().with_blob(&b"test"[..]).start().await.unwrap();
609        let blobs = blobfs.list_blobs().unwrap();
610
611        let blobfs = blobfs.into_builder().await.unwrap().start().await.unwrap();
612
613        assert_eq!(blobs, blobfs.list_blobs().unwrap());
614
615        blobfs.stop().await.unwrap();
616    }
617
618    #[fuchsia_async::run_singlethreaded(test)]
619    async fn fxblob_remount() {
620        let blobfs =
621            BlobfsRamdisk::builder().fxblob().with_blob(&b"test"[..]).start().await.unwrap();
622        let blobs = blobfs.list_blobs().unwrap();
623
624        let blobfs = blobfs.into_builder().await.unwrap().start().await.unwrap();
625
626        assert_eq!(blobs, blobfs.list_blobs().unwrap());
627
628        blobfs.stop().await.unwrap();
629    }
630
631    #[fuchsia_async::run_singlethreaded(test)]
632    async fn blob_appears_in_readdir() {
633        let blobfs = BlobfsRamdisk::start().await.unwrap();
634        let root = blobfs.root_dir().unwrap();
635
636        let hello_merkle = write_blob(&root, "Hello blobfs!".as_bytes());
637        assert_eq!(list_blobs(&root), vec![hello_merkle]);
638
639        drop(root);
640        blobfs.stop().await.unwrap();
641    }
642
643    /// Writes a blob to blobfs, returning the computed merkle root of the blob.
644    #[allow(clippy::zero_prefixed_literal)]
645    fn write_blob(dir: &openat::Dir, payload: &[u8]) -> String {
646        let merkle = fuchsia_merkle::from_slice(payload).root().to_string();
647        let compressed_data = Type1Blob::generate(payload, CompressionMode::Always);
648        let mut f = dir.new_file(delivery_blob_path(&merkle), 0600).unwrap();
649        f.set_len(compressed_data.len() as u64).unwrap();
650        f.write_all(&compressed_data).unwrap();
651
652        merkle
653    }
654
655    /// Returns an unsorted list of blobs in the given blobfs dir.
656    fn list_blobs(dir: &openat::Dir) -> Vec<String> {
657        dir.list_dir(".")
658            .unwrap()
659            .map(|entry| entry.unwrap().file_name().to_owned().into_string().unwrap())
660            .collect()
661    }
662
663    #[fuchsia_async::run_singlethreaded(test)]
664    async fn ramdisk_builder_sets_block_count() {
665        for block_count in [1, 2, 3, 16] {
666            let ramdisk = Ramdisk::builder().block_count(block_count).start().await.unwrap();
667            let client_end = ramdisk.client.open().unwrap();
668            let proxy = client_end.into_proxy();
669            let info = proxy.get_info().await.unwrap().unwrap();
670            assert_eq!(info.block_count, block_count);
671        }
672    }
673
674    #[fuchsia_async::run_singlethreaded(test)]
675    async fn ramdisk_into_blobfs_formats_ramdisk() {
676        let _: BlobfsRamdisk =
677            Ramdisk::builder().into_blobfs_builder().await.unwrap().start().await.unwrap();
678    }
679
680    #[fuchsia_async::run_singlethreaded(test)]
681    async fn blobfs_does_not_support_blob_creator_api() {
682        let blobfs = BlobfsRamdisk::builder().cpp_blobfs().start().await.unwrap();
683
684        assert!(blobfs.blob_creator_proxy().unwrap().is_none());
685
686        blobfs.stop().await.unwrap();
687    }
688
689    #[fuchsia_async::run_singlethreaded(test)]
690    async fn blobfs_does_not_support_blob_reader_api() {
691        let blobfs = BlobfsRamdisk::builder().cpp_blobfs().start().await.unwrap();
692
693        assert!(blobfs.blob_reader_proxy().unwrap().is_none());
694
695        blobfs.stop().await.unwrap();
696    }
697
698    #[fuchsia_async::run_singlethreaded(test)]
699    async fn fxblob_read_and_write() {
700        let blobfs = BlobfsRamdisk::builder().fxblob().start().await.unwrap();
701        let root = blobfs.root_dir().unwrap();
702
703        assert_eq!(list_blobs(&root), Vec::<String>::new());
704        let data = "Hello blobfs!".as_bytes();
705        let merkle = fuchsia_merkle::from_slice(data).root();
706        blobfs.write_blob(merkle, data).await.unwrap();
707
708        assert_eq!(list_blobs(&root), vec![merkle.to_string()]);
709
710        drop(root);
711        blobfs.stop().await.unwrap();
712    }
713
714    #[fuchsia_async::run_singlethreaded(test)]
715    async fn fxblob_blob_creator_api() {
716        let blobfs = BlobfsRamdisk::builder().fxblob().start().await.unwrap();
717        let root = blobfs.root_dir().unwrap();
718        assert_eq!(list_blobs(&root), Vec::<String>::new());
719
720        let bytes = [1u8; 40];
721        let hash = fuchsia_merkle::from_slice(&bytes).root();
722        let compressed_data = Type1Blob::generate(&bytes, CompressionMode::Always);
723
724        let blob_creator = blobfs.blob_creator_proxy().unwrap().unwrap();
725        let blob_writer = blob_creator.create(&hash, false).await.unwrap().unwrap();
726        let mut blob_writer =
727            blob_writer::BlobWriter::create(blob_writer.into_proxy(), compressed_data.len() as u64)
728                .await
729                .unwrap();
730        let () = blob_writer.write(&compressed_data).await.unwrap();
731
732        assert_eq!(list_blobs(&root), vec![hash.to_string()]);
733
734        drop(root);
735        blobfs.stop().await.unwrap();
736    }
737
738    #[fuchsia_async::run_singlethreaded(test)]
739    async fn fxblob_blob_reader_api() {
740        let data = "Hello blobfs!".as_bytes();
741        let hash = fuchsia_merkle::from_slice(data).root();
742        let blobfs = BlobfsRamdisk::builder().fxblob().with_blob(data).start().await.unwrap();
743
744        let root = blobfs.root_dir().unwrap();
745        assert_eq!(list_blobs(&root), vec![hash.to_string()]);
746
747        let blob_reader = blobfs.blob_reader_proxy().unwrap().unwrap();
748        let vmo = blob_reader.get_vmo(&hash.into()).await.unwrap().unwrap();
749        let mut buf = vec![0; vmo.get_content_size().unwrap() as usize];
750        let () = vmo.read(&mut buf, 0).unwrap();
751        assert_eq!(buf, data);
752
753        drop(root);
754        blobfs.stop().await.unwrap();
755    }
756}