storage_device/
block_device.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::buffer::{BufferFuture, BufferRef, MutableBufferRef};
6use crate::buffer_allocator::{BufferAllocator, BufferSource};
7use crate::Device;
8use anyhow::{bail, ensure, Error};
9use async_trait::async_trait;
10use block_client::{BlockClient, BlockFlags, BufferSlice, MutableBufferSlice, VmoId, WriteOptions};
11use std::ops::Range;
12use zx::Status;
13
14/// BlockDevice is an implementation of Device backed by a real block device behind a FIFO.
15pub struct BlockDevice {
16    allocator: BufferAllocator,
17    remote: Box<dyn BlockClient>,
18    read_only: bool,
19    vmoid: VmoId,
20}
21
22const TRANSFER_VMO_SIZE: usize = 128 * 1024 * 1024;
23
24impl BlockDevice {
25    /// Creates a new BlockDevice over |remote|.
26    pub async fn new(remote: Box<dyn BlockClient>, read_only: bool) -> Result<Self, Error> {
27        let buffer_source = BufferSource::new(TRANSFER_VMO_SIZE);
28        let vmoid = remote.attach_vmo(buffer_source.vmo()).await?;
29        let allocator = BufferAllocator::new(remote.block_size() as usize, buffer_source);
30        Ok(Self { allocator, remote, read_only, vmoid })
31    }
32}
33
34#[async_trait]
35impl Device for BlockDevice {
36    fn allocate_buffer(&self, size: usize) -> BufferFuture<'_> {
37        self.allocator.allocate_buffer(size)
38    }
39
40    fn block_size(&self) -> u32 {
41        self.remote.block_size()
42    }
43
44    fn block_count(&self) -> u64 {
45        self.remote.block_count()
46    }
47
48    async fn read(&self, offset: u64, buffer: MutableBufferRef<'_>) -> Result<(), Error> {
49        if buffer.len() == 0 {
50            return Ok(());
51        }
52        ensure!(self.vmoid.is_valid(), Status::INVALID_ARGS);
53        ensure!(offset % (self.block_size() as u64) == 0, Status::INVALID_ARGS);
54        ensure!(buffer.range().start % (self.block_size() as usize) == 0, Status::INVALID_ARGS);
55        ensure!(buffer.range().end % (self.block_size() as usize) == 0, Status::INVALID_ARGS);
56        Ok(self
57            .remote
58            .read_at(
59                MutableBufferSlice::new_with_vmo_id(
60                    &self.vmoid,
61                    buffer.range().start as u64,
62                    buffer.len() as u64,
63                ),
64                offset,
65            )
66            .await?)
67    }
68
69    async fn write_with_opts(
70        &self,
71        offset: u64,
72        buffer: BufferRef<'_>,
73        opts: WriteOptions,
74    ) -> Result<(), Error> {
75        if self.read_only {
76            bail!(Status::ACCESS_DENIED);
77        }
78        if buffer.len() == 0 {
79            return Ok(());
80        }
81        ensure!(self.vmoid.is_valid(), "Device is closed");
82        ensure!(offset % (self.block_size() as u64) == 0, Status::INVALID_ARGS);
83        ensure!(buffer.range().start % (self.block_size() as usize) == 0, Status::INVALID_ARGS);
84        ensure!(buffer.range().end % (self.block_size() as usize) == 0, Status::INVALID_ARGS);
85        Ok(self
86            .remote
87            .write_at_with_opts(
88                BufferSlice::new_with_vmo_id(
89                    &self.vmoid,
90                    buffer.range().start as u64,
91                    buffer.len() as u64,
92                ),
93                offset,
94                opts,
95            )
96            .await?)
97    }
98
99    async fn trim(&self, range: Range<u64>) -> Result<(), Error> {
100        if self.read_only {
101            bail!(Status::ACCESS_DENIED);
102        }
103        ensure!(range.start % (self.block_size() as u64) == 0, Status::INVALID_ARGS);
104        ensure!(range.end % (self.block_size() as u64) == 0, Status::INVALID_ARGS);
105        Ok(self.remote.trim(range).await?)
106    }
107
108    async fn close(&self) -> Result<(), Error> {
109        // We can leak the VMO id because we are closing the device.
110        let _ = self.vmoid.take().into_id();
111        Ok(self.remote.close().await?)
112    }
113
114    async fn flush(&self) -> Result<(), Error> {
115        Ok(self.remote.flush().await?)
116    }
117
118    fn barrier(&self) {
119        self.remote.barrier()
120    }
121
122    fn is_read_only(&self) -> bool {
123        self.read_only
124    }
125
126    fn supports_trim(&self) -> bool {
127        self.remote.block_flags().contains(BlockFlags::TRIM_SUPPORT)
128    }
129}
130
131impl Drop for BlockDevice {
132    fn drop(&mut self) {
133        // We can't detach the VmoId because we're not async here, but we are tearing down the
134        // connection to the block device so we don't really need to.
135        let _ = self.vmoid.take().into_id();
136    }
137}
138
139#[cfg(test)]
140mod tests {
141    use crate::block_device::BlockDevice;
142    use crate::Device;
143    use fake_block_client::FakeBlockClient;
144    use zx::Status;
145
146    #[fuchsia::test]
147    async fn test_lifecycle() {
148        let device = BlockDevice::new(Box::new(FakeBlockClient::new(1024, 1024)), false)
149            .await
150            .expect("new failed");
151
152        {
153            let _buf = device.allocate_buffer(8192).await;
154        }
155
156        device.close().await.expect("Close failed");
157    }
158
159    #[fuchsia::test]
160    async fn test_read_write_buffer() {
161        let device = BlockDevice::new(Box::new(FakeBlockClient::new(1024, 1024)), false)
162            .await
163            .expect("new failed");
164
165        {
166            let mut buf1 = device.allocate_buffer(8192).await;
167            let mut buf2 = device.allocate_buffer(1024).await;
168            buf1.as_mut_slice().fill(0xaa as u8);
169            buf2.as_mut_slice().fill(0xbb as u8);
170            device.write(65536, buf1.as_ref()).await.expect("Write failed");
171            device.write(65536 + 8192, buf2.as_ref()).await.expect("Write failed");
172        }
173        {
174            let mut buf = device.allocate_buffer(8192 + 1024).await;
175            device.read(65536, buf.as_mut()).await.expect("Read failed");
176            assert_eq!(buf.as_slice()[..8192], vec![0xaa as u8; 8192]);
177            assert_eq!(buf.as_slice()[8192..], vec![0xbb as u8; 1024]);
178        }
179
180        device.close().await.expect("Close failed");
181    }
182
183    #[fuchsia::test]
184    async fn test_read_only() {
185        let device = BlockDevice::new(Box::new(FakeBlockClient::new(1024, 1024)), true)
186            .await
187            .expect("new failed");
188        let mut buf1 = device.allocate_buffer(8192).await;
189        buf1.as_mut_slice().fill(0xaa as u8);
190        let err = device.write(65536, buf1.as_ref()).await.expect_err("Write succeeded");
191        assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::ACCESS_DENIED);
192    }
193
194    #[fuchsia::test]
195    async fn test_unaligned_access() {
196        let device = BlockDevice::new(Box::new(FakeBlockClient::new(1024, 1024)), false)
197            .await
198            .expect("new failed");
199        let mut buf1 = device.allocate_buffer(device.block_size() as usize * 2).await;
200        buf1.as_mut_slice().fill(0xaa as u8);
201
202        // Write checks
203        {
204            let err = device.write(1, buf1.as_ref()).await.expect_err("Write succeeded");
205            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
206        }
207        {
208            let err = device
209                .write(0, buf1.subslice(1..(device.block_size() as usize + 1)))
210                .await
211                .expect_err("Write succeeded");
212            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
213        }
214        {
215            let err = device
216                .write(0, buf1.subslice(1..device.block_size() as usize))
217                .await
218                .expect_err("Write succeeded");
219            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
220        }
221        {
222            let err = device
223                .write(0, buf1.subslice(0..(device.block_size() as usize + 1)))
224                .await
225                .expect_err("Write succeeded");
226            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
227        }
228
229        // Read checks
230        {
231            let err = device.read(1, buf1.as_mut()).await.expect_err("Read succeeded");
232            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
233        }
234        {
235            let err = device
236                .read(0, buf1.subslice_mut(1..(device.block_size() as usize + 1)))
237                .await
238                .expect_err("Read succeeded");
239            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
240        }
241        {
242            let err = device
243                .read(0, buf1.subslice_mut(1..device.block_size() as usize))
244                .await
245                .expect_err("Read succeeded");
246            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
247        }
248        {
249            let err = device
250                .read(0, buf1.subslice_mut(0..(device.block_size() as usize + 1)))
251                .await
252                .expect_err("Read succeeded");
253            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
254        }
255
256        // Trim
257        {
258            let err = device.trim(1..device.block_size() as u64).await.expect_err("Read succeeded");
259            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
260        }
261        {
262            let err =
263                device.trim(1..(device.block_size() as u64 + 1)).await.expect_err("Read succeeded");
264            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
265        }
266        {
267            let err =
268                device.trim(0..(device.block_size() as u64 + 1)).await.expect_err("Read succeeded");
269            assert_eq!(err.root_cause().downcast_ref::<Status>().unwrap(), &Status::INVALID_ARGS);
270        }
271    }
272}