blackout_target/
random_op.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Load generator for mutable filesystems which runs random operations.
6
7use anyhow::{Context, Result};
8use fidl_fuchsia_io as fio;
9use rand::rngs::StdRng;
10use rand::seq::SliceRandom;
11use rand::{distributions, Rng, SeedableRng};
12use std::sync::atomic::{AtomicU64, Ordering};
13
14// Arbitrary maximum file size just to put a cap on it.
15const BLACKOUT_MAX_FILE_SIZE: usize = 1 << 16;
16
17/// Operations that can be performed.
18#[derive(Clone, Copy, Debug)]
19pub enum Op {
20    /// Extend the file with fallocate.
21    Allocate,
22    /// Write to the file.
23    Write,
24    /// Truncate the file.
25    Truncate,
26    /// Close the connection to this file and reopen it. This effectively forces the handle to
27    /// flush.
28    Reopen,
29    /// Delete this file and make a new random one.
30    Replace,
31}
32
33/// A trait to allow clients to specify their own sample rates (or disable certain operations).
34pub trait OpSampler {
35    /// Picks a random Op.
36    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Op;
37}
38
39struct File {
40    name: String,
41    contents: Vec<u8>,
42    oid: Option<u64>,
43    proxy: Option<fio::FileProxy>,
44}
45
46impl std::fmt::Display for File {
47    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
48        write!(f, "file '{}' (current len: {}) ", self.name, self.contents.len())?;
49        if let Some(oid) = self.oid {
50            write!(f, "oid: {}", oid)?;
51        }
52        Ok(())
53    }
54}
55
56impl distributions::Distribution<File> for distributions::Standard {
57    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> File {
58        static NAME: AtomicU64 = AtomicU64::new(0);
59        let size = rng.gen_range(1..BLACKOUT_MAX_FILE_SIZE);
60        let mut contents = vec![0; size];
61        rng.fill(contents.as_mut_slice());
62        let name = NAME.fetch_add(1, Ordering::Relaxed);
63        File { name: name.to_string(), contents, oid: None, proxy: None }
64    }
65}
66
67impl File {
68    // Create this file on disk in the given directory.
69    async fn create(&mut self, dir: &fio::DirectoryProxy) -> Result<()> {
70        let file = fuchsia_fs::directory::open_file(
71            dir,
72            &self.name,
73            fio::PERM_READABLE | fio::PERM_WRITABLE | fio::Flags::FLAG_MUST_CREATE,
74        )
75        .await?;
76        fuchsia_fs::file::write(&file, &self.contents).await?;
77        let (_, attrs) = file
78            .get_attributes(fio::NodeAttributesQuery::ID)
79            .await?
80            .map_err(zx::Status::from_raw)?;
81        self.oid = attrs.id;
82        self.proxy = Some(file);
83        Ok(())
84    }
85
86    async fn reopen(&mut self, dir: &fio::DirectoryProxy) -> Result<()> {
87        let proxy = self.proxy.take().unwrap();
88        proxy
89            .close()
90            .await
91            .context("reopen close fidl error")?
92            .map_err(zx::Status::from_raw)
93            .context("reopen close returned error")?;
94        self.proxy = Some(
95            fuchsia_fs::directory::open_file(
96                dir,
97                &self.name,
98                fio::PERM_READABLE | fio::PERM_WRITABLE,
99            )
100            .await?,
101        );
102        Ok(())
103    }
104
105    fn proxy(&self) -> &fio::FileProxy {
106        self.proxy.as_ref().unwrap()
107    }
108}
109
110/// Continuously generates load on `root`.  Does not return.
111pub async fn generate_load<S: OpSampler>(
112    seed: u64,
113    op_distribution: &S,
114    root: &fio::DirectoryProxy,
115) -> Result<()> {
116    let mut rng = StdRng::seed_from_u64(seed);
117
118    // Make a set of 16 possible files to mess with.
119    let mut files: Vec<File> = (&mut rng).sample_iter(distributions::Standard).take(16).collect();
120    log::debug!("xx: creating initial files");
121    for file in &mut files {
122        log::debug!("    creating {}", file);
123        file.create(root)
124            .await
125            .with_context(|| format!("creating file {} during setup", file.name))?;
126    }
127
128    log::info!("generating load");
129    let mut scan_tick = 0;
130    loop {
131        if scan_tick >= 20 {
132            log::debug!("xx: full scan");
133            let mut entries = fuchsia_fs::directory::readdir(root)
134                .await?
135                .into_iter()
136                .map(|entry| entry.name)
137                .collect::<Vec<_>>();
138            entries.sort();
139            let mut expected_entries =
140                files.iter().map(|file| file.name.to_string()).collect::<Vec<_>>();
141            expected_entries.sort();
142            assert_eq!(entries, expected_entries);
143            for file in &files {
144                log::debug!("    scanning {}", file);
145                // Make sure we reset seek, since read and write both move the seek pointer
146                let offset = file
147                    .proxy()
148                    .seek(fio::SeekOrigin::Start, 0)
149                    .await
150                    .context("scan seek fidl error")?
151                    .map_err(zx::Status::from_raw)
152                    .context("scan seek returned error")?;
153                assert_eq!(offset, 0);
154                let data = fuchsia_fs::file::read(file.proxy()).await.context("scan read error")?;
155                assert_eq!(file.contents.len(), data.len());
156                assert_eq!(&file.contents, &data);
157            }
158            scan_tick = 0;
159        } else {
160            scan_tick += 1;
161        }
162        // unwrap: vec is always non-empty so this will never be None.
163        let file = files.choose_mut(&mut rng).unwrap();
164        match op_distribution.sample(&mut rng) {
165            Op::Allocate => {
166                // len has to be bigger than zero so make sure there is at least one byte to
167                // request.
168                let offset = rng.gen_range(0..BLACKOUT_MAX_FILE_SIZE - 1);
169                let len = rng.gen_range(1..BLACKOUT_MAX_FILE_SIZE - offset);
170                log::debug!(
171                    "op: {}, allocate range: {}..{}, len: {}",
172                    file,
173                    offset,
174                    offset + len,
175                    len
176                );
177                file.proxy()
178                    .allocate(offset as u64, len as u64, fio::AllocateMode::empty())
179                    .await
180                    .context("allocate fidl error")?
181                    .map_err(zx::Status::from_raw)
182                    .context("allocate returned error")?;
183                if file.contents.len() < offset + len {
184                    file.contents.resize(offset + len, 0);
185                }
186            }
187            Op::Write => {
188                // Make sure we are always writing at least one byte.
189                let offset = rng.gen_range(0..BLACKOUT_MAX_FILE_SIZE - 1);
190                let len = rng.gen_range(1..std::cmp::min(8192, BLACKOUT_MAX_FILE_SIZE - offset));
191                log::debug!(
192                    "op: {}, write range: {}..{}, len: {}",
193                    file,
194                    offset,
195                    offset + len,
196                    len
197                );
198                let mut data = vec![0u8; len];
199                rng.fill(data.as_mut_slice());
200                file.proxy()
201                    .write_at(&data, offset as u64)
202                    .await
203                    .context("write fidl error")?
204                    .map_err(zx::Status::from_raw)
205                    .context("write returned error")?;
206                // It's possible we are extending the file with this call, so deal with that
207                // here by filling it with zeros and then replacing that with the new content,
208                // because any space between the new offset and the old end could be sparse
209                // zeros.
210                if file.contents.len() < offset + len {
211                    file.contents.resize(offset + len, 0);
212                }
213                file.contents[offset..offset + len].copy_from_slice(&data);
214            }
215            Op::Truncate => {
216                let offset = rng.gen_range(0..BLACKOUT_MAX_FILE_SIZE);
217                log::debug!("op: {}, truncate offset: {}", file, offset);
218                file.proxy()
219                    .resize(offset as u64)
220                    .await
221                    .context("truncate fidl error")?
222                    .map_err(zx::Status::from_raw)
223                    .context("truncate returned error")?;
224                file.contents.resize(offset, 0);
225            }
226            Op::Reopen => {
227                log::debug!("op: {}, sync and reopen", file);
228                file.reopen(root).await?;
229            }
230            Op::Replace => {
231                log::debug!("op: {}, replace", file);
232                file.proxy()
233                    .close()
234                    .await
235                    .context("replace close fidl error")?
236                    .map_err(zx::Status::from_raw)
237                    .context("replace close returned error")?;
238                root.unlink(&file.name, &fio::UnlinkOptions::default())
239                    .await
240                    .context("replace unlink fidl error")?
241                    .map_err(zx::Status::from_raw)
242                    .context("replace unlink returned error")?;
243                *file = rng.gen();
244                log::debug!("    {} is replacement", file);
245                file.create(root)
246                    .await
247                    .with_context(|| format!("creating file {} as a replacement", file.name))?;
248            }
249        }
250    }
251}