vmo_backed_block_server/
vmo_backed_server.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use anyhow::{anyhow, Error};
6use block_server::async_interface::{Interface, SessionManager};
7use block_server::{BlockInfo, BlockServer, DeviceInfo, WriteOptions};
8use fidl::endpoints::ServerEnd;
9use fs_management::filesystem::BlockConnector;
10use std::borrow::Cow;
11use std::num::NonZero;
12use std::sync::Arc;
13use {fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_hardware_block_volume as fvolume};
14
15/// The Observer can silently discard writes, or fail them explicitly (zx::Status::IO is returned).
16pub enum WriteAction {
17    Write,
18    Discard,
19    Fail,
20}
21
22pub trait Observer: Send + Sync {
23    fn read(
24        &self,
25        _device_block_offset: u64,
26        _block_count: u32,
27        _vmo: &Arc<zx::Vmo>,
28        _vmo_offset: u64,
29    ) {
30    }
31
32    fn write(
33        &self,
34        _device_block_offset: u64,
35        _block_count: u32,
36        _vmo: &Arc<zx::Vmo>,
37        _vmo_offset: u64,
38        _opts: WriteOptions,
39    ) -> WriteAction {
40        WriteAction::Write
41    }
42
43    fn barrier(&self) {}
44
45    fn flush(&self) {}
46
47    fn trim(&self, _device_block_offset: u64, _block_count: u32) {}
48}
49
50/// A local server backed by a VMO.
51pub struct VmoBackedServer {
52    server: BlockServer<SessionManager<Data>>,
53}
54
55/// The initial contents of the VMO.  This also determines the size of the block device.
56pub enum InitialContents<'a> {
57    /// An empty VMO will be created with capacity for this many *blocks*.
58    FromCapacity(u64),
59    /// A VMO is created with capacity for this many *blocks* and the buffer's contents copied into
60    /// it.
61    FromBufferAndCapactity(u64, &'a [u8]),
62    /// A VMO is created which is exactly large enough for the initial contents (rounded up to block
63    /// size), and the buffer's contents copied into it.
64    FromBuffer(&'a [u8]),
65    /// The provided VMO is used.  If its size is not block-aligned, the data will be truncated.
66    FromVmo(zx::Vmo),
67}
68
69pub struct VmoBackedServerOptions<'a> {
70    /// NB: `block_count` is ignored as that comes from `initial_contents`.
71    pub info: DeviceInfo,
72    pub block_size: u32,
73    pub initial_contents: InitialContents<'a>,
74    pub observer: Option<Box<dyn Observer>>,
75}
76
77impl Default for VmoBackedServerOptions<'_> {
78    fn default() -> Self {
79        VmoBackedServerOptions {
80            info: DeviceInfo::Block(BlockInfo {
81                device_flags: fblock::Flag::empty(),
82                block_count: 0,
83                max_transfer_blocks: None,
84            }),
85            block_size: 512,
86            initial_contents: InitialContents::FromCapacity(0),
87            observer: None,
88        }
89    }
90}
91
92impl VmoBackedServerOptions<'_> {
93    pub fn build(self) -> Result<VmoBackedServer, Error> {
94        let (data, block_count) = match self.initial_contents {
95            InitialContents::FromCapacity(block_count) => {
96                (zx::Vmo::create(block_count * self.block_size as u64)?, block_count)
97            }
98            InitialContents::FromBufferAndCapactity(block_count, buf) => {
99                let needed =
100                    buf.len()
101                        .checked_next_multiple_of(self.block_size as usize)
102                        .ok_or_else(|| anyhow!("Invalid buffer size"))? as u64
103                        / self.block_size as u64;
104                if needed > block_count {
105                    return Err(anyhow!("Not enough capacity: {needed} vs {block_count}"));
106                }
107                let vmo = zx::Vmo::create(block_count * self.block_size as u64)?;
108                vmo.write(buf, 0)?;
109                (vmo, block_count)
110            }
111            InitialContents::FromBuffer(buf) => {
112                let block_count =
113                    buf.len()
114                        .checked_next_multiple_of(self.block_size as usize)
115                        .ok_or_else(|| anyhow!("Invalid buffer size"))? as u64
116                        / self.block_size as u64;
117                let vmo = zx::Vmo::create(block_count * self.block_size as u64)?;
118                vmo.write(buf, 0)?;
119                (vmo, block_count)
120            }
121            InitialContents::FromVmo(vmo) => {
122                let size = vmo.get_size()?;
123                let block_count = size / self.block_size as u64;
124                (vmo, block_count)
125            }
126        };
127
128        let info = match self.info {
129            DeviceInfo::Block(mut info) => {
130                info.block_count = block_count;
131                DeviceInfo::Block(info)
132            }
133            DeviceInfo::Partition(mut info) => {
134                info.block_range = Some(0..block_count);
135                DeviceInfo::Partition(info)
136            }
137        };
138        Ok(VmoBackedServer {
139            server: BlockServer::new(
140                self.block_size,
141                Arc::new(Data { info, block_size: self.block_size, data, observer: self.observer }),
142            ),
143        })
144    }
145}
146
147impl VmoBackedServer {
148    /// Handles `requests`.  The future will resolve when the stream terminates.
149    pub async fn serve(&self, requests: fvolume::VolumeRequestStream) -> Result<(), Error> {
150        self.server.handle_requests(requests).await
151    }
152}
153
154/// Implements `BlockConnector` to vend connections to a VmoBackedServer.
155pub struct VmoBackedServerConnector {
156    scope: fuchsia_async::Scope,
157    server: Arc<VmoBackedServer>,
158}
159
160impl VmoBackedServerConnector {
161    pub fn new(scope: fuchsia_async::Scope, server: Arc<VmoBackedServer>) -> Self {
162        Self { scope, server }
163    }
164}
165
166impl BlockConnector for VmoBackedServerConnector {
167    fn connect_channel_to_volume(
168        &self,
169        server_end: ServerEnd<fvolume::VolumeMarker>,
170    ) -> Result<(), Error> {
171        let server = self.server.clone();
172        let _ = self.scope.spawn(async move {
173            let _ = server.serve(server_end.into_stream()).await;
174        });
175        Ok(())
176    }
177}
178
179/// Extension trait for test-only functionality.  `unwrap` is used liberally in these functions, to
180/// simplify their usage in tests.
181pub trait VmoBackedServerTestingExt {
182    fn new(block_count: u64, block_size: u32, initial_content: &[u8]) -> Self;
183    fn from_vmo(block_size: u32, vmo: zx::Vmo) -> Self;
184    fn volume_proxy(self: &Arc<Self>) -> fvolume::VolumeProxy;
185    fn connect(self: &Arc<Self>, server: ServerEnd<fvolume::VolumeMarker>);
186    fn block_proxy(self: &Arc<Self>) -> fblock::BlockProxy;
187}
188
189impl VmoBackedServerTestingExt for VmoBackedServer {
190    fn new(block_count: u64, block_size: u32, initial_content: &[u8]) -> Self {
191        VmoBackedServerOptions {
192            block_size,
193            initial_contents: InitialContents::FromBufferAndCapactity(block_count, initial_content),
194            ..Default::default()
195        }
196        .build()
197        .unwrap()
198    }
199    fn from_vmo(block_size: u32, vmo: zx::Vmo) -> Self {
200        VmoBackedServerOptions {
201            block_size,
202            initial_contents: InitialContents::FromVmo(vmo),
203            ..Default::default()
204        }
205        .build()
206        .unwrap()
207    }
208
209    fn volume_proxy(self: &Arc<Self>) -> fvolume::VolumeProxy {
210        let (client, server) = fidl::endpoints::create_endpoints();
211        self.connect(server);
212        client.into_proxy()
213    }
214
215    fn connect(self: &Arc<Self>, server: ServerEnd<fvolume::VolumeMarker>) {
216        let this = self.clone();
217        fuchsia_async::Task::spawn(async move {
218            let _ = this.serve(server.into_stream()).await;
219        })
220        .detach();
221    }
222
223    fn block_proxy(self: &Arc<Self>) -> fblock::BlockProxy {
224        use fidl::endpoints::Proxy as _;
225        fblock::BlockProxy::from_channel(self.volume_proxy().into_channel().unwrap())
226    }
227}
228
229struct Data {
230    info: DeviceInfo,
231    block_size: u32,
232    data: zx::Vmo,
233    observer: Option<Box<dyn Observer>>,
234}
235
236impl Interface for Data {
237    async fn get_info(&self) -> Result<Cow<'_, DeviceInfo>, zx::Status> {
238        Ok(Cow::Borrowed(&self.info))
239    }
240
241    async fn read(
242        &self,
243        device_block_offset: u64,
244        block_count: u32,
245        vmo: &Arc<zx::Vmo>,
246        vmo_offset: u64,
247        _trace_flow_id: Option<NonZero<u64>>,
248    ) -> Result<(), zx::Status> {
249        if let Some(observer) = self.observer.as_ref() {
250            observer.read(device_block_offset, block_count, vmo, vmo_offset);
251        }
252        if let Some(max) = self.info.max_transfer_blocks().as_ref() {
253            // Requests should be split up by the core library
254            assert!(block_count <= max.get());
255        }
256        if device_block_offset + block_count as u64 > self.info.block_count().unwrap() {
257            Err(zx::Status::OUT_OF_RANGE)
258        } else {
259            vmo.write(
260                &self.data.read_to_vec(
261                    device_block_offset * self.block_size as u64,
262                    block_count as u64 * self.block_size as u64,
263                )?,
264                vmo_offset,
265            )
266        }
267    }
268
269    async fn write(
270        &self,
271        device_block_offset: u64,
272        block_count: u32,
273        vmo: &Arc<zx::Vmo>,
274        vmo_offset: u64,
275        opts: WriteOptions,
276        _trace_flow_id: Option<NonZero<u64>>,
277    ) -> Result<(), zx::Status> {
278        if let Some(observer) = self.observer.as_ref() {
279            match observer.write(device_block_offset, block_count, vmo, vmo_offset, opts) {
280                WriteAction::Write => {}
281                WriteAction::Discard => return Ok(()),
282                WriteAction::Fail => return Err(zx::Status::IO),
283            }
284        }
285        if let Some(max) = self.info.max_transfer_blocks().as_ref() {
286            // Requests should be split up by the core library
287            assert!(block_count <= max.get());
288        }
289        if device_block_offset + block_count as u64 > self.info.block_count().unwrap() {
290            Err(zx::Status::OUT_OF_RANGE)
291        } else {
292            self.data.write(
293                &vmo.read_to_vec(vmo_offset, block_count as u64 * self.block_size as u64)?,
294                device_block_offset * self.block_size as u64,
295            )
296        }
297    }
298
299    fn barrier(&self) -> Result<(), zx::Status> {
300        if let Some(observer) = self.observer.as_ref() {
301            observer.barrier();
302        }
303        Ok(())
304    }
305
306    async fn flush(&self, _trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
307        if let Some(observer) = self.observer.as_ref() {
308            observer.flush();
309        }
310        Ok(())
311    }
312
313    async fn trim(
314        &self,
315        device_block_offset: u64,
316        block_count: u32,
317        _trace_flow_id: Option<NonZero<u64>>,
318    ) -> Result<(), zx::Status> {
319        if let Some(observer) = self.observer.as_ref() {
320            observer.trim(device_block_offset, block_count);
321        }
322        if device_block_offset + block_count as u64 > self.info.block_count().unwrap() {
323            Err(zx::Status::OUT_OF_RANGE)
324        } else {
325            Ok(())
326        }
327    }
328}