storage_benchmarks/
io_benchmarks.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::{Benchmark, CacheClearableFilesystem, Filesystem, OperationDuration, OperationTimer};
6use async_trait::async_trait;
7use rand::seq::SliceRandom;
8use rand::{Rng, SeedableRng};
9use rand_xorshift::XorShiftRng;
10use std::fs::OpenOptions;
11use std::io::{Seek, SeekFrom, Write};
12use std::os::unix::fs::FileExt;
13use std::os::unix::io::AsRawFd;
14
15const RNG_SEED: u64 = 0xda782a0c3ce1819a;
16/// How many blocks to skip after each used block in sparse read benchmarks. This is set to thwart
17/// the effects of readahead on these benchmarks. As high as we can go while staying under the file
18/// size limit of 4GiB in minfs.
19const BLOCK_SKIP: usize = 255;
20
21/// A benchmark that measures how long `read` calls take to a file that should not already be cached
22/// in memory.
23#[derive(Clone)]
24pub struct ReadSequentialCold {
25    op_size: usize,
26    op_count: usize,
27}
28
29impl ReadSequentialCold {
30    pub fn new(op_size: usize, op_count: usize) -> Self {
31        Self { op_size, op_count }
32    }
33}
34
35#[async_trait]
36impl<T: CacheClearableFilesystem> Benchmark<T> for ReadSequentialCold {
37    async fn run(&self, fs: &mut T) -> Vec<OperationDuration> {
38        storage_trace::duration!(
39            c"benchmark",
40            c"ReadSequentialCold",
41            "op_size" => self.op_size,
42            "op_count" => self.op_count
43        );
44        let file_path = fs.benchmark_dir().join("file");
45
46        // Setup
47        let mut file = OpenOptions::new().write(true).create_new(true).open(&file_path).unwrap();
48        write_file(&mut file, self.op_size, self.op_count);
49        std::mem::drop(file);
50        fs.clear_cache().await;
51
52        // Benchmark
53        let mut file = OpenOptions::new().read(true).open(&file_path).unwrap();
54        read_sequential(&mut file, self.op_size, self.op_count)
55    }
56
57    fn name(&self) -> String {
58        format!("ReadSequentialCold/{}", self.op_size)
59    }
60}
61
62/// A benchmark that measures how long `read` calls take to a file that should already be cached in
63/// memory.
64#[derive(Clone)]
65pub struct ReadSequentialWarm {
66    op_size: usize,
67    op_count: usize,
68}
69
70impl ReadSequentialWarm {
71    pub fn new(op_size: usize, op_count: usize) -> Self {
72        Self { op_size, op_count }
73    }
74}
75
76#[async_trait]
77impl<T: Filesystem> Benchmark<T> for ReadSequentialWarm {
78    async fn run(&self, fs: &mut T) -> Vec<OperationDuration> {
79        storage_trace::duration!(
80            c"benchmark",
81            c"ReadSequentialWarm",
82            "op_size" => self.op_size,
83            "op_count" => self.op_count
84        );
85        let file_path = fs.benchmark_dir().join("file");
86
87        // Setup
88        let mut file =
89            OpenOptions::new().write(true).read(true).create_new(true).open(&file_path).unwrap();
90        write_file(&mut file, self.op_size, self.op_count);
91        file.seek(SeekFrom::Start(0)).unwrap();
92
93        // Benchmark
94        read_sequential(&mut file, self.op_size, self.op_count)
95    }
96
97    fn name(&self) -> String {
98        format!("ReadSequentialWarm/{}", self.op_size)
99    }
100}
101
102/// A benchmark that measures how long random `pread` calls take to a file that should not already
103/// be cached in memory.
104#[derive(Clone)]
105pub struct ReadRandomCold {
106    op_size: usize,
107    op_count: usize,
108}
109
110impl ReadRandomCold {
111    pub fn new(op_size: usize, op_count: usize) -> Self {
112        Self { op_size, op_count }
113    }
114}
115
116#[async_trait]
117impl<T: CacheClearableFilesystem> Benchmark<T> for ReadRandomCold {
118    async fn run(&self, fs: &mut T) -> Vec<OperationDuration> {
119        storage_trace::duration!(
120            c"benchmark",
121            c"ReadRandomCold",
122            "op_size" => self.op_size,
123            "op_count" => self.op_count
124        );
125        let file_path = fs.benchmark_dir().join("file");
126
127        // Setup
128        let mut file = OpenOptions::new().write(true).create_new(true).open(&file_path).unwrap();
129        write_file(&mut file, self.op_size, self.op_count);
130        std::mem::drop(file);
131        fs.clear_cache().await;
132
133        // Benchmark
134        let mut rng = XorShiftRng::seed_from_u64(RNG_SEED);
135        let mut file = OpenOptions::new().read(true).open(&file_path).unwrap();
136        read_random(&mut file, self.op_size, self.op_count, &mut rng)
137    }
138
139    fn name(&self) -> String {
140        format!("ReadRandomCold/{}", self.op_size)
141    }
142}
143
144/// A benchmark that measures how long `pread` calls take to a sparse file that should not already
145/// be cached in memory. The file's sparseness is designed to defeat the gains of readahead without
146/// making an extremely large file, but will also result in testing access to many small extents.
147#[derive(Clone)]
148pub struct ReadSparseCold {
149    op_size: usize,
150    op_count: usize,
151}
152
153impl ReadSparseCold {
154    pub fn new(op_size: usize, op_count: usize) -> Self {
155        Self { op_size, op_count }
156    }
157}
158
159#[async_trait]
160impl<T: CacheClearableFilesystem> Benchmark<T> for ReadSparseCold {
161    async fn run(&self, fs: &mut T) -> Vec<OperationDuration> {
162        storage_trace::duration!(
163            c"benchmark",
164            c"ReadSparseCold",
165            "op_size" => self.op_size,
166            "op_count" => self.op_count
167        );
168        let file_path = fs.benchmark_dir().join("file");
169
170        // Setup
171        let mut file = OpenOptions::new().write(true).create_new(true).open(&file_path).unwrap();
172        write_sparse_file(&mut file, self.op_size, self.op_count, BLOCK_SKIP);
173        std::mem::drop(file);
174        fs.clear_cache().await;
175
176        // Benchmark
177        let mut file = OpenOptions::new().read(true).open(&file_path).unwrap();
178        read_sparse(&mut file, self.op_size, self.op_count, BLOCK_SKIP)
179    }
180
181    fn name(&self) -> String {
182        format!("ReadSparseCold/{}", self.op_size)
183    }
184}
185
186/// A benchmark that measures how long random `pread` calls take to a file that should already be
187/// cached in memory.
188#[derive(Clone)]
189pub struct ReadRandomWarm {
190    op_size: usize,
191    op_count: usize,
192}
193
194impl ReadRandomWarm {
195    pub fn new(op_size: usize, op_count: usize) -> Self {
196        Self { op_size, op_count }
197    }
198}
199
200#[async_trait]
201impl<T: Filesystem> Benchmark<T> for ReadRandomWarm {
202    async fn run(&self, fs: &mut T) -> Vec<OperationDuration> {
203        storage_trace::duration!(
204            c"benchmark",
205            c"ReadRandomWarm",
206            "op_size" => self.op_size,
207            "op_count" => self.op_count
208        );
209        let file_path = fs.benchmark_dir().join("file");
210
211        // Setup
212        let mut file =
213            OpenOptions::new().write(true).read(true).create_new(true).open(&file_path).unwrap();
214        write_file(&mut file, self.op_size, self.op_count);
215
216        // Benchmark
217        let mut rng = XorShiftRng::seed_from_u64(RNG_SEED);
218        read_random(&mut file, self.op_size, self.op_count, &mut rng)
219    }
220
221    fn name(&self) -> String {
222        format!("ReadRandomWarm/{}", self.op_size)
223    }
224}
225
226/// A benchmark that measures how long `write` calls take to a new file.
227#[derive(Clone)]
228pub struct WriteSequentialCold {
229    op_size: usize,
230    op_count: usize,
231}
232
233impl WriteSequentialCold {
234    pub fn new(op_size: usize, op_count: usize) -> Self {
235        Self { op_size, op_count }
236    }
237}
238
239#[async_trait]
240impl<T: Filesystem> Benchmark<T> for WriteSequentialCold {
241    async fn run(&self, fs: &mut T) -> Vec<OperationDuration> {
242        storage_trace::duration!(
243            c"benchmark",
244            c"WriteSequentialCold",
245            "op_size" => self.op_size,
246            "op_count" => self.op_count
247        );
248        let file_path = fs.benchmark_dir().join("file");
249        let mut file = OpenOptions::new().write(true).create_new(true).open(&file_path).unwrap();
250        write_sequential(&mut file, self.op_size, self.op_count)
251    }
252
253    fn name(&self) -> String {
254        format!("WriteSequentialCold/{}", self.op_size)
255    }
256}
257
258/// A benchmark that measures how long `write` calls take when overwriting a file that should
259/// already be in memory.
260#[derive(Clone)]
261pub struct WriteSequentialWarm {
262    op_size: usize,
263    op_count: usize,
264}
265
266impl WriteSequentialWarm {
267    pub fn new(op_size: usize, op_count: usize) -> Self {
268        Self { op_size, op_count }
269    }
270}
271
272#[async_trait]
273impl<T: Filesystem> Benchmark<T> for WriteSequentialWarm {
274    async fn run(&self, fs: &mut T) -> Vec<OperationDuration> {
275        storage_trace::duration!(
276            c"benchmark",
277            c"WriteSequentialWarm",
278            "op_size" => self.op_size,
279            "op_count" => self.op_count
280        );
281        let file_path = fs.benchmark_dir().join("file");
282
283        // Setup
284        let mut file = OpenOptions::new().write(true).create_new(true).open(&file_path).unwrap();
285        write_file(&mut file, self.op_size, self.op_count);
286        file.seek(SeekFrom::Start(0)).unwrap();
287
288        // Benchmark
289        write_sequential(&mut file, self.op_size, self.op_count)
290    }
291
292    fn name(&self) -> String {
293        format!("WriteSequentialWarm/{}", self.op_size)
294    }
295}
296
297/// A benchmark that measures how long random `pwrite` calls take to a new file.
298#[derive(Clone)]
299pub struct WriteRandomCold {
300    op_size: usize,
301    op_count: usize,
302}
303
304impl WriteRandomCold {
305    pub fn new(op_size: usize, op_count: usize) -> Self {
306        Self { op_size, op_count }
307    }
308}
309
310#[async_trait]
311impl<T: Filesystem> Benchmark<T> for WriteRandomCold {
312    async fn run(&self, fs: &mut T) -> Vec<OperationDuration> {
313        storage_trace::duration!(
314            c"benchmark",
315            c"WriteRandomCold",
316            "op_size" => self.op_size,
317            "op_count" => self.op_count
318        );
319        let file_path = fs.benchmark_dir().join("file");
320        let mut file = OpenOptions::new().write(true).create_new(true).open(&file_path).unwrap();
321        let mut rng = XorShiftRng::seed_from_u64(RNG_SEED);
322        write_random(&mut file, self.op_size, self.op_count, &mut rng)
323    }
324
325    fn name(&self) -> String {
326        format!("WriteRandomCold/{}", self.op_size)
327    }
328}
329
330/// A benchmark that measures how long random `pwrite` calls take when overwriting a file that
331/// should already be in memory.
332#[derive(Clone)]
333pub struct WriteRandomWarm {
334    op_size: usize,
335    op_count: usize,
336}
337
338impl WriteRandomWarm {
339    pub fn new(op_size: usize, op_count: usize) -> Self {
340        Self { op_size, op_count }
341    }
342}
343
344#[async_trait]
345impl<T: Filesystem> Benchmark<T> for WriteRandomWarm {
346    async fn run(&self, fs: &mut T) -> Vec<OperationDuration> {
347        storage_trace::duration!(
348            c"benchmark",
349            c"WriteRandomWarm",
350            "op_size" => self.op_size,
351            "op_count" => self.op_count
352        );
353        let file_path = fs.benchmark_dir().join("file");
354
355        // Setup
356        let mut file = OpenOptions::new().write(true).create_new(true).open(&file_path).unwrap();
357        write_sequential(&mut file, self.op_size, self.op_count);
358
359        // Benchmark
360        let mut rng = XorShiftRng::seed_from_u64(RNG_SEED);
361        write_random(&mut file, self.op_size, self.op_count, &mut rng)
362    }
363
364    fn name(&self) -> String {
365        format!("WriteRandomWarm/{}", self.op_size)
366    }
367}
368
369/// A benchmark that measures how long 'write` and `fsync` calls take to a new file.
370#[derive(Clone)]
371pub struct WriteSequentialFsyncCold {
372    op_size: usize,
373    op_count: usize,
374}
375
376impl WriteSequentialFsyncCold {
377    pub fn new(op_size: usize, op_count: usize) -> Self {
378        Self { op_size, op_count }
379    }
380}
381
382#[async_trait]
383impl<T: Filesystem> Benchmark<T> for WriteSequentialFsyncCold {
384    async fn run(&self, fs: &mut T) -> Vec<OperationDuration> {
385        storage_trace::duration!(
386            c"benchmark",
387            c"WriteSequentialFsyncCold",
388            "op_size" => self.op_size,
389            "op_count" => self.op_count
390        );
391        let file_path = fs.benchmark_dir().join("file");
392        let mut file = OpenOptions::new().write(true).create_new(true).open(&file_path).unwrap();
393        write_sequential_fsync(&mut file, self.op_size, self.op_count)
394    }
395
396    fn name(&self) -> String {
397        format!("WriteSequentialFsyncCold/{}", self.op_size)
398    }
399}
400
401/// A benchmark that measures how long `write` and `fsync` calls take when overwriting a
402/// file that should already be in memory.
403#[derive(Clone)]
404pub struct WriteSequentialFsyncWarm {
405    op_size: usize,
406    op_count: usize,
407}
408
409impl WriteSequentialFsyncWarm {
410    pub fn new(op_size: usize, op_count: usize) -> Self {
411        Self { op_size, op_count }
412    }
413}
414
415#[async_trait]
416impl<T: Filesystem> Benchmark<T> for WriteSequentialFsyncWarm {
417    async fn run(&self, fs: &mut T) -> Vec<OperationDuration> {
418        storage_trace::duration!(
419            c"benchmark",
420            c"WriteSequentialFsyncWarm",
421            "op_size" => self.op_size,
422            "op_count" => self.op_count
423        );
424        let file_path = fs.benchmark_dir().join("file");
425
426        // Setup
427        let mut file = OpenOptions::new().write(true).create_new(true).open(&file_path).unwrap();
428        write_sequential(&mut file, self.op_size, self.op_count);
429        // To measure the exact performance of fsync, the previous written data must be synchronized.
430        assert_eq!(unsafe { libc::fsync(file.as_raw_fd()) }, 0);
431        file.seek(SeekFrom::Start(0)).unwrap();
432
433        // Benchmark
434        write_sequential_fsync(&mut file, self.op_size, self.op_count)
435    }
436
437    fn name(&self) -> String {
438        format!("WriteSequentialFsyncWarm/{}", self.op_size)
439    }
440}
441
442fn write_file<F: Write + FileExt>(file: &mut F, op_size: usize, op_count: usize) {
443    write_sparse_file(file, op_size, op_count, 0);
444}
445
446fn write_sparse_file<F: Write + FileExt>(
447    file: &mut F,
448    op_size: usize,
449    op_count: usize,
450    block_skip: usize,
451) {
452    let data = vec![0xAB; op_size];
453    let mut offset: u64 = 0;
454    for _ in 0..op_count {
455        assert_eq!(file.write_at(&data, offset).unwrap(), op_size);
456        offset += (op_size * (block_skip + 1)) as u64;
457    }
458}
459
460/// Makes `op_count` `read` calls to `file`, each for `op_size` bytes.
461fn read_sequential<F: AsRawFd>(
462    file: &mut F,
463    op_size: usize,
464    op_count: usize,
465) -> Vec<OperationDuration> {
466    let mut data = vec![0; op_size];
467    let mut durations = Vec::new();
468    let fd = file.as_raw_fd();
469    for i in 0..op_count {
470        storage_trace::duration!(c"benchmark", c"read", "op_number" => i);
471        let timer = OperationTimer::start();
472        let result = unsafe { libc::read(fd, data.as_mut_ptr() as *mut libc::c_void, data.len()) };
473        durations.push(timer.stop());
474        assert_eq!(result, op_size as isize);
475    }
476    durations
477}
478
479/// Makes `op_count` `pread` calls to `file`, each for `op_size` bytes `block_skip` * `op_size`
480/// bytes apart.
481fn read_sparse<F: AsRawFd>(
482    file: &mut F,
483    op_size: usize,
484    op_count: usize,
485    block_skip: usize,
486) -> Vec<OperationDuration> {
487    let mut data = vec![0; op_size];
488    let mut durations = Vec::new();
489    let fd = file.as_raw_fd();
490    let sparse_offset = ((1 + block_skip) * op_size) as i64;
491    for i in 0..op_count as i64 {
492        storage_trace::duration!(c"benchmark", c"pread", "op_number" => i);
493        let timer = OperationTimer::start();
494        let result = unsafe {
495            libc::pread(fd, data.as_mut_ptr() as *mut libc::c_void, data.len(), i * sparse_offset)
496        };
497        durations.push(timer.stop());
498        assert_eq!(result, op_size as isize);
499    }
500    durations
501}
502
503/// Makes `op_count` `write` calls to `file`, each containing `op_size` bytes.
504fn write_sequential<F: AsRawFd>(
505    file: &mut F,
506    op_size: usize,
507    op_count: usize,
508) -> Vec<OperationDuration> {
509    let data = vec![0xAB; op_size];
510    let mut durations = Vec::new();
511    let fd = file.as_raw_fd();
512    for i in 0..op_count {
513        storage_trace::duration!(c"benchmark", c"write", "op_number" => i);
514        let timer = OperationTimer::start();
515        let result = unsafe { libc::write(fd, data.as_ptr() as *const libc::c_void, data.len()) };
516        durations.push(timer.stop());
517        assert_eq!(result, op_size as isize);
518    }
519    durations
520}
521
522/// Makes `op_count` `write` calls to `file`, each containing `op_size` bytes.
523/// After write, call `fsync` and sync with disk(non-volatile medium).
524fn write_sequential_fsync<F: AsRawFd>(
525    file: &mut F,
526    op_size: usize,
527    op_count: usize,
528) -> Vec<OperationDuration> {
529    let data = vec![0xAB; op_size];
530    let mut durations = Vec::new();
531    let fd = file.as_raw_fd();
532    for i in 0..op_count {
533        storage_trace::duration!(c"benchmark", c"write", "op_number" => i);
534        let timer = OperationTimer::start();
535        let write_result =
536            unsafe { libc::write(fd, data.as_ptr() as *const libc::c_void, data.len()) };
537        let fsync_result = unsafe { libc::fsync(fd) };
538        durations.push(timer.stop());
539        assert_eq!(write_result, op_size as isize);
540        assert_eq!(fsync_result, 0);
541    }
542    durations
543}
544
545fn create_random_offsets<R: Rng>(op_size: usize, op_count: usize, rng: &mut R) -> Vec<libc::off_t> {
546    let op_count = op_count as libc::off_t;
547    let op_size = op_size as libc::off_t;
548    let mut offsets: Vec<libc::off_t> = (0..op_count).map(|offset| offset * op_size).collect();
549    offsets.shuffle(rng);
550    offsets
551}
552
553/// Reads the first `op_size * op_count` bytes in `file` by making `op_count` `pread` calls, each
554/// `pread` call reads `op_size` bytes. The offset order for the `pread` calls is randomized using
555/// `rng`.
556fn read_random<F: AsRawFd, R: Rng>(
557    file: &mut F,
558    op_size: usize,
559    op_count: usize,
560    rng: &mut R,
561) -> Vec<OperationDuration> {
562    let offsets = create_random_offsets(op_size, op_count, rng);
563    let mut data = vec![0xAB; op_size];
564    let mut durations = Vec::new();
565    let fd = file.as_raw_fd();
566    for (i, offset) in offsets.iter().enumerate() {
567        storage_trace::duration!(c"benchmark", c"pread", "op_number" => i, "offset" => *offset);
568        let timer = OperationTimer::start();
569        let result =
570            unsafe { libc::pread(fd, data.as_mut_ptr() as *mut libc::c_void, data.len(), *offset) };
571        durations.push(timer.stop());
572        assert_eq!(result, op_size as isize);
573    }
574    durations
575}
576
577/// Overwrites the first `op_size * op_count` bytes in `file` by making `op_count` `pwrite` calls,
578/// each `pwrite` call writes `op_size` bytes. The offset order for the `pwrite` calls is
579/// randomized using `rng`.
580fn write_random<F: AsRawFd, R: Rng>(
581    file: &mut F,
582    op_size: usize,
583    op_count: usize,
584    rng: &mut R,
585) -> Vec<OperationDuration> {
586    let offsets = create_random_offsets(op_size, op_count, rng);
587    let data = vec![0xAB; op_size];
588    let mut durations = Vec::new();
589    let fd = file.as_raw_fd();
590    for (i, offset) in offsets.iter().enumerate() {
591        storage_trace::duration!(c"benchmark", c"pwrite", "op_number" => i, "offset" => *offset);
592        let timer = OperationTimer::start();
593        let result =
594            unsafe { libc::pwrite(fd, data.as_ptr() as *const libc::c_void, data.len(), *offset) };
595        durations.push(timer.stop());
596        assert_eq!(result, op_size as isize);
597    }
598    durations
599}
600
601#[cfg(test)]
602mod tests {
603    use super::*;
604    use crate::testing::TestFilesystem;
605
606    const OP_SIZE: usize = 8;
607    const OP_COUNT: usize = 2;
608
609    async fn check_benchmark<T>(benchmark: T, op_count: usize, clear_cache_count: u64)
610    where
611        T: Benchmark<TestFilesystem>,
612    {
613        let mut test_fs = Box::new(TestFilesystem::new());
614        let results = benchmark.run(test_fs.as_mut()).await;
615
616        assert_eq!(results.len(), op_count);
617        assert_eq!(test_fs.clear_cache_count().await, clear_cache_count);
618        test_fs.shutdown().await;
619    }
620
621    #[fuchsia::test]
622    async fn read_sequential_cold_test() {
623        check_benchmark(
624            ReadSequentialCold::new(OP_SIZE, OP_COUNT),
625            OP_COUNT,
626            /*clear_cache_count=*/ 1,
627        )
628        .await;
629    }
630
631    #[fuchsia::test]
632    async fn read_sequential_warm_test() {
633        check_benchmark(
634            ReadSequentialWarm::new(OP_SIZE, OP_COUNT),
635            OP_COUNT,
636            /*clear_cache_count=*/ 0,
637        )
638        .await;
639    }
640
641    #[fuchsia::test]
642    async fn read_random_cold_test() {
643        check_benchmark(
644            ReadRandomCold::new(OP_SIZE, OP_COUNT),
645            OP_COUNT,
646            /*clear_cache_count=*/ 1,
647        )
648        .await;
649    }
650
651    #[fuchsia::test]
652    async fn read_sparse_cold_test() {
653        check_benchmark(
654            ReadSparseCold::new(OP_SIZE, OP_COUNT),
655            OP_COUNT,
656            /*clear_cache_count=*/ 1,
657        )
658        .await;
659    }
660
661    #[fuchsia::test]
662    async fn read_random_warm_test() {
663        check_benchmark(
664            ReadRandomWarm::new(OP_SIZE, OP_COUNT),
665            OP_COUNT,
666            /*clear_cache_count=*/ 0,
667        )
668        .await;
669    }
670
671    #[fuchsia::test]
672    async fn write_sequential_cold_test() {
673        check_benchmark(
674            WriteSequentialCold::new(OP_SIZE, OP_COUNT),
675            OP_COUNT,
676            /*clear_cache_count=*/ 0,
677        )
678        .await;
679    }
680
681    #[fuchsia::test]
682    async fn write_sequential_warm_test() {
683        check_benchmark(
684            WriteSequentialWarm::new(OP_SIZE, OP_COUNT),
685            OP_COUNT,
686            /*clear_cache_count=*/ 0,
687        )
688        .await;
689    }
690
691    #[fuchsia::test]
692    async fn write_random_cold_test() {
693        check_benchmark(
694            WriteRandomCold::new(OP_SIZE, OP_COUNT),
695            OP_COUNT,
696            /*clear_cache_count=*/ 0,
697        )
698        .await;
699    }
700
701    #[fuchsia::test]
702    async fn write_random_warm_test() {
703        check_benchmark(
704            WriteRandomWarm::new(OP_SIZE, OP_COUNT),
705            OP_COUNT,
706            /*clear_cache_count=*/ 0,
707        )
708        .await;
709    }
710
711    #[fuchsia::test]
712    async fn write_sequential_fsync_cold_test() {
713        check_benchmark(
714            WriteSequentialFsyncCold::new(OP_SIZE, OP_COUNT),
715            OP_COUNT,
716            /*clear_cache_count=*/ 0,
717        )
718        .await;
719    }
720
721    #[fuchsia::test]
722    async fn write_sequential_fsync_warm_test() {
723        check_benchmark(
724            WriteSequentialFsyncWarm::new(OP_SIZE, OP_COUNT),
725            OP_COUNT,
726            /*clear_cache_count=*/ 0,
727        )
728        .await;
729    }
730}