blackout_target/
random_op.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Load generator for mutable filesystems which runs random operations.
6
7use anyhow::{Context, Result};
8use fidl_fuchsia_io as fio;
9use rand::distr::{Distribution, StandardUniform};
10use rand::rngs::StdRng;
11use rand::seq::IndexedMutRandom;
12use rand::{Rng, SeedableRng};
13use std::sync::atomic::{AtomicU64, Ordering};
14
15// Arbitrary maximum file size just to put a cap on it.
16const BLACKOUT_MAX_FILE_SIZE: usize = 1 << 16;
17
18/// Operations that can be performed.
19#[derive(Clone, Copy, Debug)]
20pub enum Op {
21    /// Extend the file with fallocate.
22    Allocate,
23    /// Write to the file.
24    Write,
25    /// Truncate the file.
26    Truncate,
27    /// Close the connection to this file and reopen it. This effectively forces the handle to
28    /// flush.
29    Reopen,
30    /// Delete this file and make a new random one.
31    Replace,
32}
33
34/// A trait to allow clients to specify their own sample rates (or disable certain operations).
35pub trait OpSampler {
36    /// Picks a random Op.
37    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Op;
38}
39
40struct File {
41    name: String,
42    contents: Vec<u8>,
43    oid: Option<u64>,
44    proxy: Option<fio::FileProxy>,
45}
46
47impl std::fmt::Display for File {
48    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
49        write!(f, "file '{}' (current len: {}) ", self.name, self.contents.len())?;
50        if let Some(oid) = self.oid {
51            write!(f, "oid: {}", oid)?;
52        }
53        Ok(())
54    }
55}
56
57impl Distribution<File> for StandardUniform {
58    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> File {
59        static NAME: AtomicU64 = AtomicU64::new(0);
60        let size = rng.random_range(1..BLACKOUT_MAX_FILE_SIZE);
61        let mut contents = vec![0; size];
62        rng.fill(contents.as_mut_slice());
63        let name = NAME.fetch_add(1, Ordering::Relaxed);
64        File { name: name.to_string(), contents, oid: None, proxy: None }
65    }
66}
67
68impl File {
69    // Create this file on disk in the given directory.
70    async fn create(&mut self, dir: &fio::DirectoryProxy) -> Result<()> {
71        let file = fuchsia_fs::directory::open_file(
72            dir,
73            &self.name,
74            fio::PERM_READABLE | fio::PERM_WRITABLE | fio::Flags::FLAG_MUST_CREATE,
75        )
76        .await?;
77        fuchsia_fs::file::write(&file, &self.contents).await?;
78        let (_, attrs) = file
79            .get_attributes(fio::NodeAttributesQuery::ID)
80            .await?
81            .map_err(zx::Status::from_raw)?;
82        self.oid = attrs.id;
83        self.proxy = Some(file);
84        Ok(())
85    }
86
87    async fn reopen(&mut self, dir: &fio::DirectoryProxy) -> Result<()> {
88        let proxy = self.proxy.take().unwrap();
89        proxy
90            .close()
91            .await
92            .context("reopen close fidl error")?
93            .map_err(zx::Status::from_raw)
94            .context("reopen close returned error")?;
95        self.proxy = Some(
96            fuchsia_fs::directory::open_file(
97                dir,
98                &self.name,
99                fio::PERM_READABLE | fio::PERM_WRITABLE,
100            )
101            .await?,
102        );
103        Ok(())
104    }
105
106    fn proxy(&self) -> &fio::FileProxy {
107        self.proxy.as_ref().unwrap()
108    }
109}
110
111/// Continuously generates load on `root`.  Does not return.
112pub async fn generate_load<S: OpSampler>(
113    seed: u64,
114    op_distribution: &S,
115    root: &fio::DirectoryProxy,
116) -> Result<()> {
117    let mut rng = StdRng::seed_from_u64(seed);
118
119    // Make a set of 16 possible files to mess with.
120    let mut files: Vec<File> = (&mut rng).sample_iter(StandardUniform).take(16).collect();
121    log::debug!("xx: creating initial files");
122    for file in &mut files {
123        log::debug!("    creating {}", file);
124        file.create(root)
125            .await
126            .with_context(|| format!("creating file {} during setup", file.name))?;
127    }
128
129    log::info!("generating load");
130    let mut scan_tick = 0;
131    loop {
132        if scan_tick >= 20 {
133            log::debug!("xx: full scan");
134            let mut entries = fuchsia_fs::directory::readdir(root)
135                .await?
136                .into_iter()
137                .map(|entry| entry.name)
138                .collect::<Vec<_>>();
139            entries.sort();
140            let mut expected_entries =
141                files.iter().map(|file| file.name.to_string()).collect::<Vec<_>>();
142            expected_entries.sort();
143            assert_eq!(entries, expected_entries);
144            for file in &files {
145                log::debug!("    scanning {}", file);
146                // Make sure we reset seek, since read and write both move the seek pointer
147                let offset = file
148                    .proxy()
149                    .seek(fio::SeekOrigin::Start, 0)
150                    .await
151                    .context("scan seek fidl error")?
152                    .map_err(zx::Status::from_raw)
153                    .context("scan seek returned error")?;
154                assert_eq!(offset, 0);
155                let data = fuchsia_fs::file::read(file.proxy()).await.context("scan read error")?;
156                assert_eq!(file.contents.len(), data.len());
157                assert_eq!(&file.contents, &data);
158            }
159            scan_tick = 0;
160        } else {
161            scan_tick += 1;
162        }
163        // unwrap: vec is always non-empty so this will never be None.
164        let file = files.choose_mut(&mut rng).unwrap();
165        match op_distribution.sample(&mut rng) {
166            Op::Allocate => {
167                // len has to be bigger than zero so make sure there is at least one byte to
168                // request.
169                let offset = rng.random_range(0..BLACKOUT_MAX_FILE_SIZE - 1);
170                let len = rng.random_range(1..BLACKOUT_MAX_FILE_SIZE - offset);
171                log::debug!(
172                    "op: {}, allocate range: {}..{}, len: {}",
173                    file,
174                    offset,
175                    offset + len,
176                    len
177                );
178                file.proxy()
179                    .allocate(offset as u64, len as u64, fio::AllocateMode::empty())
180                    .await
181                    .context("allocate fidl error")?
182                    .map_err(zx::Status::from_raw)
183                    .context("allocate returned error")?;
184                if file.contents.len() < offset + len {
185                    file.contents.resize(offset + len, 0);
186                }
187            }
188            Op::Write => {
189                // Make sure we are always writing at least one byte.
190                let offset = rng.random_range(0..BLACKOUT_MAX_FILE_SIZE - 1);
191                let len = rng.random_range(1..std::cmp::min(8192, BLACKOUT_MAX_FILE_SIZE - offset));
192                log::debug!(
193                    "op: {}, write range: {}..{}, len: {}",
194                    file,
195                    offset,
196                    offset + len,
197                    len
198                );
199                let mut data = vec![0u8; len];
200                rng.fill(data.as_mut_slice());
201                file.proxy()
202                    .write_at(&data, offset as u64)
203                    .await
204                    .context("write fidl error")?
205                    .map_err(zx::Status::from_raw)
206                    .context("write returned error")?;
207                // It's possible we are extending the file with this call, so deal with that
208                // here by filling it with zeros and then replacing that with the new content,
209                // because any space between the new offset and the old end could be sparse
210                // zeros.
211                if file.contents.len() < offset + len {
212                    file.contents.resize(offset + len, 0);
213                }
214                file.contents[offset..offset + len].copy_from_slice(&data);
215            }
216            Op::Truncate => {
217                let offset = rng.random_range(0..BLACKOUT_MAX_FILE_SIZE);
218                log::debug!("op: {}, truncate offset: {}", file, offset);
219                file.proxy()
220                    .resize(offset as u64)
221                    .await
222                    .context("truncate fidl error")?
223                    .map_err(zx::Status::from_raw)
224                    .context("truncate returned error")?;
225                file.contents.resize(offset, 0);
226            }
227            Op::Reopen => {
228                log::debug!("op: {}, sync and reopen", file);
229                file.reopen(root).await?;
230            }
231            Op::Replace => {
232                log::debug!("op: {}, replace", file);
233                file.proxy()
234                    .close()
235                    .await
236                    .context("replace close fidl error")?
237                    .map_err(zx::Status::from_raw)
238                    .context("replace close returned error")?;
239                root.unlink(&file.name, &fio::UnlinkOptions::default())
240                    .await
241                    .context("replace unlink fidl error")?
242                    .map_err(zx::Status::from_raw)
243                    .context("replace unlink returned error")?;
244                *file = rng.random();
245                log::debug!("    {} is replacement", file);
246                file.create(root)
247                    .await
248                    .with_context(|| format!("creating file {} as a replacement", file.name))?;
249            }
250        }
251    }
252}