Skip to main content

gpt_component/
partition.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4use crate::gpt::GptPartition;
5use anyhow::Error;
6use block_client::{ReadOptions, VmoId, WriteOptions};
7use block_server::async_interface::{PassthroughSession, SessionManager};
8use block_server::{DeviceInfo, OffsetMap};
9use fidl_fuchsia_storage_block as fblock;
10use fuchsia_async as fasync;
11
12use fuchsia_sync::Mutex;
13use std::borrow::Cow;
14use std::collections::BTreeMap;
15use std::num::NonZero;
16use std::sync::{Arc, Weak};
17
18/// A wrapper around a VmoId which keeps it active until all requests which use the Vmoid are
19/// complete.  Strong references are held by ongoing requests.
20pub struct VmoIdWrapper {
21    partition: Weak<GptPartition>,
22    vmo_id: VmoId,
23}
24
25impl std::ops::Deref for VmoIdWrapper {
26    type Target = VmoId;
27    fn deref(&self) -> &Self::Target {
28        &self.vmo_id
29    }
30}
31
32impl Drop for VmoIdWrapper {
33    fn drop(&mut self) {
34        // Turn it into an ID so that if the spawned task is dropped, the assertion in VmoId::drop
35        // doesn't fire.  It will mean the ID is leaked, but it's most likely that the server is
36        // being shut down anyway so it shouldn't matter.
37        let vmo_id = self.vmo_id.take().into_id();
38        if let Some(partition) = self.partition.upgrade() {
39            fasync::Task::spawn(async move {
40                if let Err(e) = partition.detach_vmo(VmoId::new(vmo_id)).await {
41                    log::error!("detach_vmo failed: {:?}", e);
42                }
43            })
44            .detach();
45        }
46    }
47}
48
49/// PartitionBackend is an implementation of block_server's Interface which is backed by a windowed
50/// view of the underlying GPT device.
51pub struct PartitionBackend {
52    partition: Arc<GptPartition>,
53    vmo_keys_to_vmoids_map: Mutex<BTreeMap<usize, Arc<VmoIdWrapper>>>,
54    passthrough: bool,
55}
56
57impl block_server::async_interface::Interface for PartitionBackend {
58    async fn open_session(
59        &self,
60        session_manager: Arc<SessionManager<Self>>,
61        stream: fblock::SessionRequestStream,
62        offset_map: OffsetMap,
63        block_size: u32,
64    ) -> Result<(), Error> {
65        if !self.passthrough || !offset_map.is_empty() {
66            // For now, we don't support double-passthrough.  We could as needed for nested GPT.
67            // If we support this, we can remove I/O and vmoid management from this struct.
68            return session_manager.serve_session(stream, offset_map, block_size).await;
69        }
70        let (proxy, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
71        self.partition.open_passthrough_session(server_end);
72        let passthrough = PassthroughSession::new(proxy);
73        passthrough.serve(stream).await
74    }
75
76    async fn on_attach_vmo(&self, vmo: &zx::Vmo) -> Result<(), zx::Status> {
77        let key = std::ptr::from_ref(vmo) as usize;
78        let vmo_id = self.partition.attach_vmo(vmo).await?;
79        self.vmo_keys_to_vmoids_map.lock().insert(
80            key,
81            Arc::new(VmoIdWrapper { partition: Arc::downgrade(&self.partition), vmo_id }),
82        );
83        Ok(())
84    }
85
86    fn on_detach_vmo(&self, vmo: &zx::Vmo) {
87        // Note that we will not immediately detach the VMO.  This happens when the last reference
88        // to it is dropped (in [`VmoIdWrapper::drop`]).
89        let key = std::ptr::from_ref(vmo) as usize;
90        self.vmo_keys_to_vmoids_map.lock().remove(&key);
91    }
92
93    fn get_info(&self) -> Cow<'_, DeviceInfo> {
94        Cow::Owned(self.partition.get_info())
95    }
96
97    async fn read(
98        &self,
99        device_block_offset: u64,
100        block_count: u32,
101        vmo: &Arc<zx::Vmo>,
102        vmo_offset: u64, // *bytes* not blocks
103        opts: ReadOptions,
104        trace_flow_id: Option<NonZero<u64>>,
105    ) -> Result<(), zx::Status> {
106        let vmo_id = self.get_vmoid(vmo)?;
107        self.partition
108            .read(device_block_offset, block_count, &vmo_id, vmo_offset, opts, trace_flow_id)
109            .await
110    }
111
112    async fn write(
113        &self,
114        device_block_offset: u64,
115        length: u32,
116        vmo: &Arc<zx::Vmo>,
117        vmo_offset: u64, // *bytes* not blocks
118        opts: WriteOptions,
119        trace_flow_id: Option<NonZero<u64>>,
120    ) -> Result<(), zx::Status> {
121        let vmo_id = self.get_vmoid(vmo)?;
122        self.partition
123            .write(device_block_offset, length, &vmo_id, vmo_offset, opts, trace_flow_id)
124            .await
125    }
126
127    async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
128        self.partition.flush(trace_flow_id).await
129    }
130
131    async fn trim(
132        &self,
133        device_block_offset: u64,
134        block_count: u32,
135        trace_flow_id: Option<NonZero<u64>>,
136    ) -> Result<(), zx::Status> {
137        self.partition.trim(device_block_offset, block_count, trace_flow_id).await
138    }
139}
140
141impl PartitionBackend {
142    #[cfg(test)]
143    pub fn vmo_count(&self) -> usize {
144        self.vmo_keys_to_vmoids_map.lock().len()
145    }
146
147    pub fn new(partition: Arc<GptPartition>, passthrough: bool) -> Arc<Self> {
148        Arc::new(Self {
149            partition,
150            vmo_keys_to_vmoids_map: Mutex::new(BTreeMap::new()),
151            passthrough,
152        })
153    }
154
155    /// Returns the old info.
156    pub fn update_info(&self, info: gpt::PartitionInfo) -> gpt::PartitionInfo {
157        self.partition.update_info(info)
158    }
159
160    fn get_vmoid(&self, vmo: &zx::Vmo) -> Result<Arc<VmoIdWrapper>, zx::Status> {
161        let key = std::ptr::from_ref(vmo) as usize;
162        self.vmo_keys_to_vmoids_map.lock().get(&key).map(Arc::clone).ok_or(zx::Status::NOT_FOUND)
163    }
164}