gpt_component/
gpt.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::partition::PartitionBackend;
6use crate::partitions_directory::PartitionsDirectory;
7use anyhow::{anyhow, Context as _, Error};
8use block_client::{
9    BlockClient as _, BufferSlice, MutableBufferSlice, RemoteBlockClient, VmoId, WriteOptions,
10};
11use block_server::async_interface::SessionManager;
12use block_server::BlockServer;
13
14use fidl::endpoints::ServerEnd;
15use futures::lock::Mutex;
16use futures::stream::TryStreamExt as _;
17use std::collections::BTreeMap;
18use std::num::NonZero;
19use std::ops::Range;
20use std::sync::atomic::{AtomicBool, Ordering};
21use std::sync::{Arc, Weak};
22use zx::AsHandleRef as _;
23use {
24    fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_storage_partitions as fpartitions,
25    fuchsia_async as fasync,
26};
27
28fn partition_directory_entry_name(index: u32) -> String {
29    format!("part-{:03}", index)
30}
31
32/// A single partition in a GPT device.
33pub struct GptPartition {
34    gpt: Weak<GptManager>,
35    block_client: Arc<RemoteBlockClient>,
36    block_range: Range<u64>,
37    index: u32,
38}
39
40fn trace_id(trace_flow_id: Option<NonZero<u64>>) -> u64 {
41    trace_flow_id.map(|v| v.get()).unwrap_or_default()
42}
43
44impl GptPartition {
45    pub fn new(
46        gpt: &Arc<GptManager>,
47        block_client: Arc<RemoteBlockClient>,
48        index: u32,
49        block_range: Range<u64>,
50    ) -> Arc<Self> {
51        debug_assert!(block_range.end >= block_range.start);
52        Arc::new(Self { gpt: Arc::downgrade(gpt), block_client, block_range, index })
53    }
54
55    pub async fn terminate(&self) {
56        if let Err(error) = self.block_client.close().await {
57            log::warn!(error:?; "Failed to close block client");
58        }
59    }
60
61    pub fn index(&self) -> u32 {
62        self.index
63    }
64
65    pub fn block_size(&self) -> u32 {
66        self.block_client.block_size()
67    }
68
69    pub fn block_count(&self) -> u64 {
70        self.block_range.end - self.block_range.start
71    }
72
73    pub async fn attach_vmo(&self, vmo: &zx::Vmo) -> Result<VmoId, zx::Status> {
74        self.block_client.attach_vmo(vmo).await
75    }
76
77    pub async fn detach_vmo(&self, vmoid: VmoId) -> Result<(), zx::Status> {
78        self.block_client.detach_vmo(vmoid).await
79    }
80
81    pub fn open_passthrough_session(&self, session: ServerEnd<fblock::SessionMarker>) {
82        if let Some(gpt) = self.gpt.upgrade() {
83            let mappings = [fblock::BlockOffsetMapping {
84                source_block_offset: 0,
85                target_block_offset: self.block_range.start,
86                length: self.block_count(),
87            }];
88            if let Err(err) =
89                gpt.block_proxy.open_session_with_offset_map(session, None, Some(&mappings[..]))
90            {
91                // Client errors normally come back on `session` but that was already consumed.  The
92                // client will get a PEER_CLOSED without an epitaph.
93                log::warn!(err:?; "Failed to open passthrough session");
94            }
95        } else {
96            if let Err(err) = session.close_with_epitaph(zx::Status::BAD_STATE) {
97                log::warn!(err:?; "Failed to send session epitaph");
98            }
99        }
100    }
101
102    pub async fn get_info(&self) -> Result<block_server::DeviceInfo, zx::Status> {
103        if let Some(gpt) = self.gpt.upgrade() {
104            gpt.inner
105                .lock()
106                .await
107                .gpt
108                .partitions()
109                .get(&self.index)
110                .map(|info| {
111                    convert_partition_info(
112                        info,
113                        self.block_client.block_flags(),
114                        self.block_client.max_transfer_blocks(),
115                    )
116                })
117                .ok_or(zx::Status::BAD_STATE)
118        } else {
119            Err(zx::Status::BAD_STATE)
120        }
121    }
122
123    pub async fn read(
124        &self,
125        device_block_offset: u64,
126        block_count: u32,
127        vmo_id: &VmoId,
128        vmo_offset: u64, // *bytes* not blocks
129        trace_flow_id: Option<NonZero<u64>>,
130    ) -> Result<(), zx::Status> {
131        let dev_offset = self
132            .absolute_offset(device_block_offset, block_count)
133            .map(|offset| offset * self.block_size() as u64)?;
134        let buffer = MutableBufferSlice::new_with_vmo_id(
135            vmo_id,
136            vmo_offset,
137            (block_count * self.block_size()) as u64,
138        );
139        self.block_client.read_at_traced(buffer, dev_offset, trace_id(trace_flow_id)).await
140    }
141
142    pub fn barrier(&self) {
143        self.block_client.barrier();
144    }
145
146    pub async fn write(
147        &self,
148        device_block_offset: u64,
149        block_count: u32,
150        vmo_id: &VmoId,
151        vmo_offset: u64, // *bytes* not blocks
152        opts: WriteOptions,
153        trace_flow_id: Option<NonZero<u64>>,
154    ) -> Result<(), zx::Status> {
155        let dev_offset = self
156            .absolute_offset(device_block_offset, block_count)
157            .map(|offset| offset * self.block_size() as u64)?;
158        let buffer = BufferSlice::new_with_vmo_id(
159            vmo_id,
160            vmo_offset,
161            (block_count * self.block_size()) as u64,
162        );
163        self.block_client
164            .write_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
165            .await
166    }
167
168    pub async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
169        self.block_client.flush_traced(trace_id(trace_flow_id)).await
170    }
171
172    pub async fn trim(
173        &self,
174        device_block_offset: u64,
175        block_count: u32,
176        trace_flow_id: Option<NonZero<u64>>,
177    ) -> Result<(), zx::Status> {
178        let dev_offset = self
179            .absolute_offset(device_block_offset, block_count)
180            .map(|offset| offset * self.block_size() as u64)?;
181        let len = block_count as u64 * self.block_size() as u64;
182        self.block_client.trim_traced(dev_offset..dev_offset + len, trace_id(trace_flow_id)).await
183    }
184
185    // Converts a relative range specified by [offset, offset+len) into an absolute offset in the
186    // GPT device, performing bounds checking within the partition.  Returns ZX_ERR_OUT_OF_RANGE for
187    // an invalid offset/len.
188    fn absolute_offset(&self, mut offset: u64, len: u32) -> Result<u64, zx::Status> {
189        offset = offset.checked_add(self.block_range.start).ok_or(zx::Status::OUT_OF_RANGE)?;
190        let end = offset.checked_add(len as u64).ok_or(zx::Status::OUT_OF_RANGE)?;
191        if end > self.block_range.end {
192            Err(zx::Status::OUT_OF_RANGE)
193        } else {
194            Ok(offset)
195        }
196    }
197}
198
199fn convert_partition_info(
200    info: &gpt::PartitionInfo,
201    device_flags: fblock::Flag,
202    max_transfer_blocks: Option<NonZero<u32>>,
203) -> block_server::DeviceInfo {
204    block_server::DeviceInfo::Partition(block_server::PartitionInfo {
205        device_flags,
206        max_transfer_blocks,
207        block_range: Some(info.start_block..info.start_block + info.num_blocks),
208        type_guid: info.type_guid.to_bytes(),
209        instance_guid: info.instance_guid.to_bytes(),
210        name: info.label.clone(),
211        flags: info.flags,
212    })
213}
214
215struct PendingTransaction {
216    transaction: gpt::Transaction,
217    client_koid: zx::Koid,
218    // A list of indexes for partitions which were added in the transaction.  When committing, all
219    // newly created partitions are published.
220    added_partitions: Vec<u32>,
221    // A task which waits for the client end to be closed and clears the pending transaction.
222    _signal_task: fasync::Task<()>,
223}
224
225struct Inner {
226    gpt: gpt::Gpt,
227    partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
228    // Exposes all partitions for discovery by other components.  Should be kept in sync with
229    // `partitions`.
230    partitions_dir: PartitionsDirectory,
231    pending_transaction: Option<PendingTransaction>,
232}
233
234impl Inner {
235    /// Ensures that `transaction` matches our pending transaction.
236    fn ensure_transaction_matches(&self, transaction: &zx::EventPair) -> Result<(), zx::Status> {
237        if let Some(pending) = self.pending_transaction.as_ref() {
238            if transaction.get_koid()? == pending.client_koid {
239                Ok(())
240            } else {
241                Err(zx::Status::BAD_HANDLE)
242            }
243        } else {
244            Err(zx::Status::BAD_STATE)
245        }
246    }
247
248    async fn bind_partition(
249        &mut self,
250        parent: &Arc<GptManager>,
251        index: u32,
252        info: gpt::PartitionInfo,
253    ) -> Result<(), Error> {
254        log::info!("GPT part {index}: {info:?}");
255        let partition = PartitionBackend::new(GptPartition::new(
256            parent,
257            self.gpt.client().clone(),
258            index,
259            info.start_block
260                ..info
261                    .start_block
262                    .checked_add(info.num_blocks)
263                    .ok_or_else(|| anyhow!("Overflow in partition range"))?,
264        ));
265        let block_server = Arc::new(BlockServer::new(parent.block_size, partition));
266        self.partitions_dir.add_entry(
267            &partition_directory_entry_name(index),
268            Arc::downgrade(&block_server),
269            Arc::downgrade(parent),
270            index as usize,
271        );
272        self.partitions.insert(index, block_server);
273        Ok(())
274    }
275
276    async fn bind_all_partitions(&mut self, parent: &Arc<GptManager>) -> Result<(), Error> {
277        self.partitions.clear();
278        self.partitions_dir.clear();
279        for (index, info) in self.gpt.partitions().clone() {
280            self.bind_partition(parent, index, info).await?;
281        }
282        Ok(())
283    }
284
285    fn add_partition(&mut self, info: gpt::PartitionInfo) -> Result<usize, gpt::AddPartitionError> {
286        let pending = self.pending_transaction.as_mut().unwrap();
287        let idx = self.gpt.add_partition(&mut pending.transaction, info)?;
288        pending.added_partitions.push(idx as u32);
289        Ok(idx)
290    }
291}
292
293/// Runs a GPT device.
294pub struct GptManager {
295    block_proxy: fblock::BlockProxy,
296    block_size: u32,
297    block_count: u64,
298    inner: Mutex<Inner>,
299    shutdown: AtomicBool,
300}
301
302impl std::fmt::Debug for GptManager {
303    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
304        f.debug_struct("GptManager")
305            .field("block_size", &self.block_size)
306            .field("block_count", &self.block_count)
307            .finish()
308    }
309}
310
311impl GptManager {
312    pub async fn new(
313        block_proxy: fblock::BlockProxy,
314        partitions_dir: Arc<vfs::directory::immutable::Simple>,
315    ) -> Result<Arc<Self>, Error> {
316        log::info!("Binding to GPT");
317        let client = Arc::new(RemoteBlockClient::new(block_proxy.clone()).await?);
318        let block_size = client.block_size();
319        let block_count = client.block_count();
320        let gpt = gpt::Gpt::open(client).await.context("Failed to load GPT")?;
321
322        let this = Arc::new(Self {
323            block_proxy,
324            block_size,
325            block_count,
326            inner: Mutex::new(Inner {
327                gpt,
328                partitions: BTreeMap::new(),
329                partitions_dir: PartitionsDirectory::new(partitions_dir),
330                pending_transaction: None,
331            }),
332            shutdown: AtomicBool::new(false),
333        });
334        log::info!("Bind to GPT OK, binding partitions");
335        this.inner.lock().await.bind_all_partitions(&this).await?;
336        log::info!("Starting all partitions OK!");
337        Ok(this)
338    }
339
340    pub fn block_size(&self) -> u32 {
341        self.block_size
342    }
343
344    pub fn block_count(&self) -> u64 {
345        self.block_count
346    }
347
348    pub async fn create_transaction(self: &Arc<Self>) -> Result<zx::EventPair, zx::Status> {
349        let mut inner = self.inner.lock().await;
350        if inner.pending_transaction.is_some() {
351            return Err(zx::Status::ALREADY_EXISTS);
352        }
353        let transaction = inner.gpt.create_transaction().unwrap();
354        let (client_end, server_end) = zx::EventPair::create();
355        let client_koid = client_end.get_koid()?;
356        let signal_waiter = fasync::OnSignals::new(server_end, zx::Signals::EVENTPAIR_PEER_CLOSED);
357        let this = self.clone();
358        let task = fasync::Task::spawn(async move {
359            let _ = signal_waiter.await;
360            let mut inner = this.inner.lock().await;
361            if inner.pending_transaction.as_ref().map_or(false, |t| t.client_koid == client_koid) {
362                inner.pending_transaction = None;
363            }
364        });
365        inner.pending_transaction = Some(PendingTransaction {
366            transaction,
367            client_koid,
368            added_partitions: vec![],
369            _signal_task: task,
370        });
371        Ok(client_end)
372    }
373
374    pub async fn commit_transaction(
375        self: &Arc<Self>,
376        transaction: zx::EventPair,
377    ) -> Result<(), zx::Status> {
378        let mut inner = self.inner.lock().await;
379        inner.ensure_transaction_matches(&transaction)?;
380        let pending = std::mem::take(&mut inner.pending_transaction).unwrap();
381        if let Err(err) = inner.gpt.commit_transaction(pending.transaction).await {
382            log::error!(err:?; "Failed to commit transaction");
383            return Err(zx::Status::IO);
384        }
385        for idx in pending.added_partitions {
386            let info = inner.gpt.partitions().get(&idx).ok_or(zx::Status::BAD_STATE)?.clone();
387            inner.bind_partition(self, idx, info).await.map_err(|err| {
388                log::error!(err:?; "Failed to bind partition");
389                zx::Status::BAD_STATE
390            })?;
391        }
392        Ok(())
393    }
394
395    pub async fn add_partition(
396        &self,
397        request: fpartitions::PartitionsManagerAddPartitionRequest,
398    ) -> Result<(), zx::Status> {
399        let mut inner = self.inner.lock().await;
400        inner.ensure_transaction_matches(
401            request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
402        )?;
403        let info = gpt::PartitionInfo {
404            label: request.name.ok_or(zx::Status::INVALID_ARGS)?,
405            type_guid: request
406                .type_guid
407                .map(|value| gpt::Guid::from_bytes(value.value))
408                .ok_or(zx::Status::INVALID_ARGS)?,
409            instance_guid: request
410                .instance_guid
411                .map(|value| gpt::Guid::from_bytes(value.value))
412                .unwrap_or_else(|| gpt::Guid::generate()),
413            start_block: 0,
414            num_blocks: request.num_blocks.ok_or(zx::Status::INVALID_ARGS)?,
415            flags: request.flags.unwrap_or_default(),
416        };
417        let idx = inner.add_partition(info)?;
418        let partition =
419            inner.pending_transaction.as_ref().unwrap().transaction.partitions.get(idx).unwrap();
420        log::info!(
421            "Allocated partition {:?} at {:?}",
422            partition.label,
423            partition.start_block..partition.start_block + partition.num_blocks
424        );
425        Ok(())
426    }
427
428    pub async fn handle_partitions_requests(
429        &self,
430        gpt_index: usize,
431        mut requests: fpartitions::PartitionRequestStream,
432    ) -> Result<(), zx::Status> {
433        while let Some(request) = requests.try_next().await.unwrap() {
434            match request {
435                fpartitions::PartitionRequest::UpdateMetadata { payload, responder } => {
436                    responder
437                        .send(
438                            self.update_partition_metadata(gpt_index, payload)
439                                .await
440                                .map_err(|status| status.into_raw()),
441                        )
442                        .unwrap_or_else(
443                            |err| log::error!(err:?; "Failed to send UpdateMetadata response"),
444                        );
445                }
446            }
447        }
448        Ok(())
449    }
450
451    async fn update_partition_metadata(
452        &self,
453        gpt_index: usize,
454        request: fpartitions::PartitionUpdateMetadataRequest,
455    ) -> Result<(), zx::Status> {
456        let mut inner = self.inner.lock().await;
457        inner.ensure_transaction_matches(
458            request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
459        )?;
460
461        let transaction = &mut inner.pending_transaction.as_mut().unwrap().transaction;
462        let entry = transaction.partitions.get_mut(gpt_index).ok_or(zx::Status::BAD_STATE)?;
463        if let Some(type_guid) = request.type_guid.as_ref().cloned() {
464            entry.type_guid = gpt::Guid::from_bytes(type_guid.value);
465        }
466        if let Some(flags) = request.flags.as_ref() {
467            entry.flags = *flags;
468        }
469        Ok(())
470    }
471
472    pub async fn reset_partition_table(
473        self: &Arc<Self>,
474        partitions: Vec<gpt::PartitionInfo>,
475    ) -> Result<(), zx::Status> {
476        let mut inner = self.inner.lock().await;
477        if inner.pending_transaction.is_some() {
478            return Err(zx::Status::BAD_STATE);
479        }
480
481        log::info!("Resetting gpt.  Expect data loss!!!");
482        let mut transaction = inner.gpt.create_transaction().unwrap();
483        transaction.partitions = partitions;
484        inner.gpt.commit_transaction(transaction).await?;
485
486        log::info!("Rebinding partitions...");
487        if let Err(err) = inner.bind_all_partitions(&self).await {
488            log::error!(err:?; "Failed to rebind partitions");
489            return Err(zx::Status::BAD_STATE);
490        }
491        log::info!("Rebinding partitions OK!");
492        Ok(())
493    }
494
495    pub async fn shutdown(self: Arc<Self>) {
496        log::info!("Shutting down gpt");
497        let mut inner = self.inner.lock().await;
498        inner.partitions_dir.clear();
499        inner.partitions.clear();
500        self.shutdown.store(true, Ordering::Relaxed);
501        log::info!("Shutting down gpt OK");
502    }
503}
504
505impl Drop for GptManager {
506    fn drop(&mut self) {
507        assert!(self.shutdown.load(Ordering::Relaxed), "Did you forget to shutdown?");
508    }
509}
510
511#[cfg(test)]
512mod tests {
513    use super::GptManager;
514    use block_client::{BlockClient as _, BufferSlice, MutableBufferSlice, RemoteBlockClient};
515    use block_server::WriteOptions;
516    use fake_block_server::{FakeServer, FakeServerOptions};
517    use fidl::HandleBased as _;
518    use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
519    use gpt::{Gpt, Guid, PartitionInfo};
520    use std::num::NonZero;
521    use std::sync::atomic::{AtomicBool, Ordering};
522    use std::sync::Arc;
523    use {
524        fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_hardware_block_volume as fvolume,
525        fidl_fuchsia_io as fio, fidl_fuchsia_storage_partitions as fpartitions,
526        fuchsia_async as fasync,
527    };
528
529    async fn setup(
530        block_size: u32,
531        block_count: u64,
532        partitions: Vec<PartitionInfo>,
533    ) -> (Arc<FakeServer>, Arc<vfs::directory::immutable::Simple>) {
534        setup_with_options(
535            FakeServerOptions { block_count: Some(block_count), block_size, ..Default::default() },
536            partitions,
537        )
538        .await
539    }
540
541    async fn setup_with_options(
542        opts: FakeServerOptions<'_>,
543        partitions: Vec<PartitionInfo>,
544    ) -> (Arc<FakeServer>, Arc<vfs::directory::immutable::Simple>) {
545        let server = Arc::new(FakeServer::from(opts));
546        {
547            let (block_client, block_server) =
548                fidl::endpoints::create_proxy::<fblock::BlockMarker>();
549            let volume_stream = fidl::endpoints::ServerEnd::<fvolume::VolumeMarker>::from(
550                block_server.into_channel(),
551            )
552            .into_stream();
553            let server_clone = server.clone();
554            let _task = fasync::Task::spawn(async move { server_clone.serve(volume_stream).await });
555            let client = Arc::new(RemoteBlockClient::new(block_client).await.unwrap());
556            Gpt::format(client, partitions).await.unwrap();
557        }
558        (server, vfs::directory::immutable::simple())
559    }
560
561    #[fuchsia::test]
562    async fn load_unformatted_gpt() {
563        let vmo = zx::Vmo::create(4096).unwrap();
564        let server = Arc::new(FakeServer::from_vmo(512, vmo));
565
566        GptManager::new(server.block_proxy(), vfs::directory::immutable::simple())
567            .await
568            .expect_err("load should fail");
569    }
570
571    #[fuchsia::test]
572    async fn load_formatted_empty_gpt() {
573        let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
574
575        let runner = GptManager::new(block_device.block_proxy(), partitions_dir)
576            .await
577            .expect("load should succeed");
578        runner.shutdown().await;
579    }
580
581    #[fuchsia::test]
582    async fn load_formatted_gpt_with_one_partition() {
583        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
584        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
585        const PART_NAME: &str = "part";
586
587        let (block_device, partitions_dir) = setup(
588            512,
589            8,
590            vec![PartitionInfo {
591                label: PART_NAME.to_string(),
592                type_guid: Guid::from_bytes(PART_TYPE_GUID),
593                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
594                start_block: 4,
595                num_blocks: 1,
596                flags: 0,
597            }],
598        )
599        .await;
600
601        let partitions_dir_clone = partitions_dir.clone();
602        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
603            .await
604            .expect("load should succeed");
605        partitions_dir.get_entry("part-000").expect("No entry found");
606        partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
607        runner.shutdown().await;
608    }
609
610    #[fuchsia::test]
611    async fn load_formatted_gpt_with_two_partitions() {
612        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
613        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
614        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
615        const PART_1_NAME: &str = "part1";
616        const PART_2_NAME: &str = "part2";
617
618        let (block_device, partitions_dir) = setup(
619            512,
620            8,
621            vec![
622                PartitionInfo {
623                    label: PART_1_NAME.to_string(),
624                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
625                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
626                    start_block: 4,
627                    num_blocks: 1,
628                    flags: 0,
629                },
630                PartitionInfo {
631                    label: PART_2_NAME.to_string(),
632                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
633                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
634                    start_block: 5,
635                    num_blocks: 1,
636                    flags: 0,
637                },
638            ],
639        )
640        .await;
641
642        let partitions_dir_clone = partitions_dir.clone();
643        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
644            .await
645            .expect("load should succeed");
646        partitions_dir.get_entry("part-000").expect("No entry found");
647        partitions_dir.get_entry("part-001").expect("No entry found");
648        partitions_dir.get_entry("part-002").map(|_| ()).expect_err("Extra entry found");
649        runner.shutdown().await;
650    }
651
652    #[fuchsia::test]
653    async fn partition_io() {
654        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
655        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
656        const PART_NAME: &str = "part";
657
658        let (block_device, partitions_dir) = setup(
659            512,
660            8,
661            vec![PartitionInfo {
662                label: PART_NAME.to_string(),
663                type_guid: Guid::from_bytes(PART_TYPE_GUID),
664                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
665                start_block: 4,
666                num_blocks: 2,
667                flags: 0,
668            }],
669        )
670        .await;
671
672        let partitions_dir_clone = partitions_dir.clone();
673        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
674            .await
675            .expect("load should succeed");
676
677        let proxy = vfs::serve_directory(
678            partitions_dir.clone(),
679            vfs::path::Path::validate_and_split("part-000").unwrap(),
680            fio::PERM_READABLE,
681        );
682        let block =
683            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
684                .expect("Failed to open block service");
685        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
686
687        assert_eq!(client.block_count(), 2);
688        assert_eq!(client.block_size(), 512);
689
690        let buf = vec![0xabu8; 512];
691        client.write_at(BufferSlice::Memory(&buf[..]), 0).await.expect("write_at failed");
692        client
693            .write_at(BufferSlice::Memory(&buf[..]), 1024)
694            .await
695            .expect_err("write_at should fail when writing past partition end");
696        let mut buf2 = vec![0u8; 512];
697        client.read_at(MutableBufferSlice::Memory(&mut buf2[..]), 0).await.expect("read_at failed");
698        assert_eq!(buf, buf2);
699        client
700            .read_at(MutableBufferSlice::Memory(&mut buf2[..]), 1024)
701            .await
702            .expect_err("read_at should fail when reading past partition end");
703        client.trim(512..1024).await.expect("trim failed");
704        client.trim(1..512).await.expect_err("trim with invalid range should fail");
705        client.trim(1024..1536).await.expect_err("trim past end of partition should fail");
706        runner.shutdown().await;
707
708        // Ensure writes persisted to the partition.
709        let mut buf = vec![0u8; 512];
710        let client = RemoteBlockClient::new(block_device.block_proxy()).await.unwrap();
711        client.read_at(MutableBufferSlice::Memory(&mut buf[..]), 2048).await.unwrap();
712        assert_eq!(&buf[..], &[0xabu8; 512]);
713    }
714
715    #[fuchsia::test]
716    async fn load_formatted_gpt_with_invalid_primary_header() {
717        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
718        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
719        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
720        const PART_1_NAME: &str = "part1";
721        const PART_2_NAME: &str = "part2";
722
723        let (block_device, partitions_dir) = setup(
724            512,
725            8,
726            vec![
727                PartitionInfo {
728                    label: PART_1_NAME.to_string(),
729                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
730                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
731                    start_block: 4,
732                    num_blocks: 1,
733                    flags: 0,
734                },
735                PartitionInfo {
736                    label: PART_2_NAME.to_string(),
737                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
738                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
739                    start_block: 5,
740                    num_blocks: 1,
741                    flags: 0,
742                },
743            ],
744        )
745        .await;
746        {
747            let (client, stream) =
748                fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
749            let server = block_device.clone();
750            let _task = fasync::Task::spawn(async move { server.serve(stream).await });
751            let client = RemoteBlockClient::new(client).await.unwrap();
752            client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 512).await.unwrap();
753        }
754
755        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
756            .await
757            .expect("load should succeed");
758        partitions_dir.get_entry("part-000").expect("No entry found");
759        partitions_dir.get_entry("part-001").expect("No entry found");
760        runner.shutdown().await;
761    }
762
763    #[fuchsia::test]
764    async fn load_formatted_gpt_with_invalid_primary_partition_table() {
765        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
766        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
767        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
768        const PART_1_NAME: &str = "part1";
769        const PART_2_NAME: &str = "part2";
770
771        let (block_device, partitions_dir) = setup(
772            512,
773            8,
774            vec![
775                PartitionInfo {
776                    label: PART_1_NAME.to_string(),
777                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
778                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
779                    start_block: 4,
780                    num_blocks: 1,
781                    flags: 0,
782                },
783                PartitionInfo {
784                    label: PART_2_NAME.to_string(),
785                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
786                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
787                    start_block: 5,
788                    num_blocks: 1,
789                    flags: 0,
790                },
791            ],
792        )
793        .await;
794        {
795            let (client, stream) =
796                fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
797            let server = block_device.clone();
798            let _task = fasync::Task::spawn(async move { server.serve(stream).await });
799            let client = RemoteBlockClient::new(client).await.unwrap();
800            client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 1024).await.unwrap();
801        }
802
803        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
804            .await
805            .expect("load should succeed");
806        partitions_dir.get_entry("part-000").expect("No entry found");
807        partitions_dir.get_entry("part-001").expect("No entry found");
808        runner.shutdown().await;
809    }
810
811    #[fuchsia::test]
812    async fn force_access_passed_through() {
813        const BLOCK_SIZE: u32 = 512;
814        const BLOCK_COUNT: u64 = 1024;
815
816        struct Observer(Arc<AtomicBool>);
817
818        impl fake_block_server::Observer for Observer {
819            fn write(
820                &self,
821                _device_block_offset: u64,
822                _block_count: u32,
823                _vmo: &Arc<zx::Vmo>,
824                _vmo_offset: u64,
825                opts: WriteOptions,
826            ) -> fake_block_server::WriteAction {
827                assert_eq!(
828                    opts.contains(WriteOptions::FORCE_ACCESS),
829                    self.0.load(Ordering::Relaxed)
830                );
831                fake_block_server::WriteAction::Write
832            }
833        }
834
835        let expect_force_access = Arc::new(AtomicBool::new(false));
836        let (server, partitions_dir) = setup_with_options(
837            FakeServerOptions {
838                block_count: Some(BLOCK_COUNT),
839                block_size: BLOCK_SIZE,
840                observer: Some(Box::new(Observer(expect_force_access.clone()))),
841                ..Default::default()
842            },
843            vec![PartitionInfo {
844                label: "foo".to_string(),
845                type_guid: Guid::from_bytes([1; 16]),
846                instance_guid: Guid::from_bytes([2; 16]),
847                start_block: 4,
848                num_blocks: 1,
849                flags: 0,
850            }],
851        )
852        .await;
853
854        let manager = GptManager::new(server.block_proxy(), partitions_dir.clone()).await.unwrap();
855
856        let proxy = vfs::serve_directory(
857            partitions_dir.clone(),
858            vfs::path::Path::validate_and_split("part-000").unwrap(),
859            fio::PERM_READABLE,
860        );
861        let block =
862            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
863                .expect("Failed to open block service");
864        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
865
866        let buffer = vec![0; BLOCK_SIZE as usize];
867        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
868
869        expect_force_access.store(true, Ordering::Relaxed);
870
871        client
872            .write_at_with_opts(BufferSlice::Memory(&buffer), 0, WriteOptions::FORCE_ACCESS)
873            .await
874            .unwrap();
875
876        manager.shutdown().await;
877    }
878
879    #[fuchsia::test]
880    async fn barrier_passed_through() {
881        const BLOCK_SIZE: u32 = 512;
882        const BLOCK_COUNT: u64 = 1024;
883
884        struct Observer(Arc<AtomicBool>);
885
886        impl fake_block_server::Observer for Observer {
887            fn barrier(&self) {
888                self.0.store(true, Ordering::Relaxed);
889            }
890        }
891
892        let expect_barrier = Arc::new(AtomicBool::new(false));
893        let (server, partitions_dir) = setup_with_options(
894            FakeServerOptions {
895                block_count: Some(BLOCK_COUNT),
896                block_size: BLOCK_SIZE,
897                observer: Some(Box::new(Observer(expect_barrier.clone()))),
898                ..Default::default()
899            },
900            vec![PartitionInfo {
901                label: "foo".to_string(),
902                type_guid: Guid::from_bytes([1; 16]),
903                instance_guid: Guid::from_bytes([2; 16]),
904                start_block: 4,
905                num_blocks: 1,
906                flags: 0,
907            }],
908        )
909        .await;
910
911        let manager = GptManager::new(server.block_proxy(), partitions_dir.clone()).await.unwrap();
912
913        let proxy = vfs::serve_directory(
914            partitions_dir.clone(),
915            vfs::path::Path::validate_and_split("part-000").unwrap(),
916            fio::PERM_READABLE,
917        );
918        let block =
919            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
920                .expect("Failed to open block service");
921        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
922
923        let buffer = vec![0; BLOCK_SIZE as usize];
924        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
925
926        client.barrier();
927        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
928
929        assert!(expect_barrier.load(Ordering::Relaxed));
930
931        manager.shutdown().await;
932    }
933
934    #[fuchsia::test]
935    async fn commit_transaction() {
936        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
937        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
938        const PART_1_NAME: &str = "part";
939        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
940        const PART_2_NAME: &str = "part2";
941
942        let (block_device, partitions_dir) = setup(
943            512,
944            16,
945            vec![
946                PartitionInfo {
947                    label: PART_1_NAME.to_string(),
948                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
949                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
950                    start_block: 4,
951                    num_blocks: 1,
952                    flags: 0,
953                },
954                PartitionInfo {
955                    label: PART_2_NAME.to_string(),
956                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
957                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
958                    start_block: 5,
959                    num_blocks: 1,
960                    flags: 0,
961                },
962            ],
963        )
964        .await;
965        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
966            .await
967            .expect("load should succeed");
968
969        let part_0_dir = vfs::serve_directory(
970            partitions_dir.clone(),
971            vfs::Path::validate_and_split("part-000").unwrap(),
972            fio::PERM_READABLE,
973        );
974        let part_1_dir = vfs::serve_directory(
975            partitions_dir.clone(),
976            vfs::Path::validate_and_split("part-001").unwrap(),
977            fio::PERM_READABLE,
978        );
979        let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
980            &part_0_dir,
981            "partition",
982        )
983        .expect("Failed to open Partition service");
984        let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
985            &part_1_dir,
986            "partition",
987        )
988        .expect("Failed to open Partition service");
989
990        let transaction = runner.create_transaction().await.expect("Failed to create transaction");
991        part_0_proxy
992            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
993                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
994                type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid {
995                    value: [0xffu8; 16],
996                }),
997                ..Default::default()
998            })
999            .await
1000            .expect("FIDL error")
1001            .expect("Failed to update_metadata");
1002        part_1_proxy
1003            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1004                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1005                flags: Some(1234),
1006                ..Default::default()
1007            })
1008            .await
1009            .expect("FIDL error")
1010            .expect("Failed to update_metadata");
1011        runner.commit_transaction(transaction).await.expect("Failed to commit transaction");
1012
1013        // Ensure the changes have propagated to the correct partitions.
1014        let part_0_block =
1015            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_0_dir, "volume")
1016                .expect("Failed to open Volume service");
1017        let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
1018        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1019        assert_eq!(guid.unwrap().value, [0xffu8; 16]);
1020        let part_1_block =
1021            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_1_dir, "volume")
1022                .expect("Failed to open Volume service");
1023        let metadata =
1024            part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1025        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1026        assert_eq!(metadata.flags, Some(1234));
1027
1028        runner.shutdown().await;
1029    }
1030
1031    #[fuchsia::test]
1032    async fn reset_partition_tables() {
1033        // The test will reset the tables from ["part", "part2"] to
1034        // ["part3", <empty>, "part4", <125 empty entries>].
1035        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1036        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1037        const PART_1_NAME: &str = "part";
1038        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1039        const PART_2_NAME: &str = "part2";
1040        const PART_3_NAME: &str = "part3";
1041        const PART_4_NAME: &str = "part4";
1042
1043        let (block_device, partitions_dir) = setup(
1044            512,
1045            1048576 / 512,
1046            vec![
1047                PartitionInfo {
1048                    label: PART_1_NAME.to_string(),
1049                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1050                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1051                    start_block: 4,
1052                    num_blocks: 1,
1053                    flags: 0,
1054                },
1055                PartitionInfo {
1056                    label: PART_2_NAME.to_string(),
1057                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1058                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1059                    start_block: 5,
1060                    num_blocks: 1,
1061                    flags: 0,
1062                },
1063            ],
1064        )
1065        .await;
1066        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1067            .await
1068            .expect("load should succeed");
1069        let nil_entry = PartitionInfo {
1070            label: "".to_string(),
1071            type_guid: Guid::from_bytes([0u8; 16]),
1072            instance_guid: Guid::from_bytes([0u8; 16]),
1073            start_block: 0,
1074            num_blocks: 0,
1075            flags: 0,
1076        };
1077        let mut new_partitions = vec![nil_entry; 128];
1078        new_partitions[0] = PartitionInfo {
1079            label: PART_3_NAME.to_string(),
1080            type_guid: Guid::from_bytes(PART_TYPE_GUID),
1081            instance_guid: Guid::from_bytes([1u8; 16]),
1082            start_block: 64,
1083            num_blocks: 2,
1084            flags: 0,
1085        };
1086        new_partitions[2] = PartitionInfo {
1087            label: PART_4_NAME.to_string(),
1088            type_guid: Guid::from_bytes(PART_TYPE_GUID),
1089            instance_guid: Guid::from_bytes([2u8; 16]),
1090            start_block: 66,
1091            num_blocks: 4,
1092            flags: 0,
1093        };
1094        runner.reset_partition_table(new_partitions).await.expect("reset_partition_table failed");
1095        partitions_dir.get_entry("part-000").expect("No entry found");
1096        partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
1097        partitions_dir.get_entry("part-002").expect("No entry found");
1098
1099        let proxy = vfs::serve_directory(
1100            partitions_dir.clone(),
1101            vfs::path::Path::validate_and_split("part-000").unwrap(),
1102            fio::PERM_READABLE,
1103        );
1104        let block =
1105            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1106                .expect("Failed to open block service");
1107        let (status, name) = block.get_name().await.expect("FIDL error");
1108        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1109        assert_eq!(name.unwrap(), PART_3_NAME);
1110
1111        runner.shutdown().await;
1112    }
1113
1114    #[fuchsia::test]
1115    async fn reset_partition_tables_fails_if_too_many_partitions() {
1116        let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
1117        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1118            .await
1119            .expect("load should succeed");
1120        let nil_entry = PartitionInfo {
1121            label: "".to_string(),
1122            type_guid: Guid::from_bytes([0u8; 16]),
1123            instance_guid: Guid::from_bytes([0u8; 16]),
1124            start_block: 0,
1125            num_blocks: 0,
1126            flags: 0,
1127        };
1128        let new_partitions = vec![nil_entry; 128];
1129        runner
1130            .reset_partition_table(new_partitions)
1131            .await
1132            .expect_err("reset_partition_table should fail");
1133
1134        runner.shutdown().await;
1135    }
1136
1137    #[fuchsia::test]
1138    async fn reset_partition_tables_fails_if_too_large_partitions() {
1139        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1140        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1141            .await
1142            .expect("load should succeed");
1143        let new_partitions = vec![
1144            PartitionInfo {
1145                label: "a".to_string(),
1146                type_guid: Guid::from_bytes([1u8; 16]),
1147                instance_guid: Guid::from_bytes([1u8; 16]),
1148                start_block: 4,
1149                num_blocks: 2,
1150                flags: 0,
1151            },
1152            PartitionInfo {
1153                label: "b".to_string(),
1154                type_guid: Guid::from_bytes([2u8; 16]),
1155                instance_guid: Guid::from_bytes([2u8; 16]),
1156                start_block: 6,
1157                num_blocks: 200,
1158                flags: 0,
1159            },
1160        ];
1161        runner
1162            .reset_partition_table(new_partitions)
1163            .await
1164            .expect_err("reset_partition_table should fail");
1165
1166        runner.shutdown().await;
1167    }
1168
1169    #[fuchsia::test]
1170    async fn reset_partition_tables_fails_if_partition_overlaps_metadata() {
1171        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1172        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1173            .await
1174            .expect("load should succeed");
1175        let new_partitions = vec![PartitionInfo {
1176            label: "a".to_string(),
1177            type_guid: Guid::from_bytes([1u8; 16]),
1178            instance_guid: Guid::from_bytes([1u8; 16]),
1179            start_block: 1,
1180            num_blocks: 2,
1181            flags: 0,
1182        }];
1183        runner
1184            .reset_partition_table(new_partitions)
1185            .await
1186            .expect_err("reset_partition_table should fail");
1187
1188        runner.shutdown().await;
1189    }
1190
1191    #[fuchsia::test]
1192    async fn reset_partition_tables_fails_if_partitions_overlap() {
1193        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1194        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1195            .await
1196            .expect("load should succeed");
1197        let new_partitions = vec![
1198            PartitionInfo {
1199                label: "a".to_string(),
1200                type_guid: Guid::from_bytes([1u8; 16]),
1201                instance_guid: Guid::from_bytes([1u8; 16]),
1202                start_block: 32,
1203                num_blocks: 2,
1204                flags: 0,
1205            },
1206            PartitionInfo {
1207                label: "b".to_string(),
1208                type_guid: Guid::from_bytes([2u8; 16]),
1209                instance_guid: Guid::from_bytes([2u8; 16]),
1210                start_block: 33,
1211                num_blocks: 1,
1212                flags: 0,
1213            },
1214        ];
1215        runner
1216            .reset_partition_table(new_partitions)
1217            .await
1218            .expect_err("reset_partition_table should fail");
1219
1220        runner.shutdown().await;
1221    }
1222
1223    #[fuchsia::test]
1224    async fn add_partition() {
1225        let (block_device, partitions_dir) = setup(512, 64, vec![PartitionInfo::nil(); 64]).await;
1226        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1227            .await
1228            .expect("load should succeed");
1229
1230        let transaction = runner.create_transaction().await.expect("Create transaction failed");
1231        let request = fpartitions::PartitionsManagerAddPartitionRequest {
1232            transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1233            name: Some("a".to_string()),
1234            type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid { value: [1u8; 16] }),
1235            num_blocks: Some(2),
1236            ..Default::default()
1237        };
1238        runner.add_partition(request).await.expect("add_partition failed");
1239        runner.commit_transaction(transaction).await.expect("add_partition failed");
1240
1241        let proxy = vfs::serve_directory(
1242            partitions_dir.clone(),
1243            vfs::path::Path::validate_and_split("part-000").unwrap(),
1244            fio::PERM_READABLE,
1245        );
1246        let block =
1247            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1248                .expect("Failed to open block service");
1249        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1250
1251        assert_eq!(client.block_count(), 2);
1252        assert_eq!(client.block_size(), 512);
1253
1254        runner.shutdown().await;
1255    }
1256
1257    #[fuchsia::test]
1258    async fn partition_info() {
1259        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1260        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1261        const PART_NAME: &str = "part";
1262
1263        let (block_device, partitions_dir) = setup_with_options(
1264            FakeServerOptions {
1265                block_count: Some(8),
1266                block_size: 512,
1267                max_transfer_blocks: NonZero::new(2),
1268                flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1269                ..Default::default()
1270            },
1271            vec![PartitionInfo {
1272                label: PART_NAME.to_string(),
1273                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1274                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1275                start_block: 4,
1276                num_blocks: 1,
1277                flags: 0xabcd,
1278            }],
1279        )
1280        .await;
1281
1282        let partitions_dir_clone = partitions_dir.clone();
1283        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
1284            .await
1285            .expect("load should succeed");
1286
1287        let part_dir = vfs::serve_directory(
1288            partitions_dir.clone(),
1289            vfs::path::Path::validate_and_split("part-000").unwrap(),
1290            fio::PERM_READABLE,
1291        );
1292        let part_block =
1293            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_dir, "volume")
1294                .expect("Failed to open Volume service");
1295        let info = part_block.get_info().await.expect("FIDL error").expect("get_info failed");
1296        assert_eq!(info.block_count, 1);
1297        assert_eq!(info.block_size, 512);
1298        assert_eq!(info.flags, fblock::Flag::READONLY | fblock::Flag::REMOVABLE);
1299        assert_eq!(info.max_transfer_size, 1024);
1300
1301        let metadata =
1302            part_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1303        assert_eq!(metadata.name, Some(PART_NAME.to_string()));
1304        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1305        assert_eq!(metadata.instance_guid.unwrap().value, PART_INSTANCE_GUID);
1306        assert_eq!(metadata.start_block_offset, Some(4));
1307        assert_eq!(metadata.num_blocks, Some(1));
1308        assert_eq!(metadata.flags, Some(0xabcd));
1309
1310        runner.shutdown().await;
1311    }
1312
1313    #[fuchsia::test]
1314    async fn nested_gpt() {
1315        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1316        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1317        const PART_NAME: &str = "part";
1318
1319        let vmo = zx::Vmo::create(64 * 512).unwrap();
1320        let vmo_clone = vmo.create_child(zx::VmoChildOptions::REFERENCE, 0, 0).unwrap();
1321        let (outer_block_device, outer_partitions_dir) = setup_with_options(
1322            FakeServerOptions {
1323                vmo: Some(vmo_clone),
1324                block_size: 512,
1325                flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1326                ..Default::default()
1327            },
1328            vec![PartitionInfo {
1329                label: PART_NAME.to_string(),
1330                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1331                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1332                start_block: 4,
1333                num_blocks: 16,
1334                flags: 0xabcd,
1335            }],
1336        )
1337        .await;
1338
1339        let outer_partitions_dir_clone = outer_partitions_dir.clone();
1340        let outer_runner =
1341            GptManager::new(outer_block_device.block_proxy(), outer_partitions_dir_clone)
1342                .await
1343                .expect("load should succeed");
1344
1345        let outer_part_dir = vfs::serve_directory(
1346            outer_partitions_dir.clone(),
1347            vfs::path::Path::validate_and_split("part-000").unwrap(),
1348            fio::PERM_READABLE,
1349        );
1350        let part_block =
1351            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&outer_part_dir, "volume")
1352                .expect("Failed to open Block service");
1353
1354        let client = Arc::new(RemoteBlockClient::new(part_block.clone()).await.unwrap());
1355        let _ = gpt::Gpt::format(
1356            client,
1357            vec![PartitionInfo {
1358                label: PART_NAME.to_string(),
1359                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1360                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1361                start_block: 5,
1362                num_blocks: 1,
1363                flags: 0xabcd,
1364            }],
1365        )
1366        .await
1367        .unwrap();
1368
1369        let partitions_dir = vfs::directory::immutable::simple();
1370        let partitions_dir_clone = partitions_dir.clone();
1371        let runner =
1372            GptManager::new(part_block, partitions_dir_clone).await.expect("load should succeed");
1373        let part_dir = vfs::serve_directory(
1374            partitions_dir.clone(),
1375            vfs::path::Path::validate_and_split("part-000").unwrap(),
1376            fio::PERM_READABLE,
1377        );
1378        let inner_part_block =
1379            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1380                .expect("Failed to open Block service");
1381
1382        let client =
1383            RemoteBlockClient::new(inner_part_block).await.expect("Failed to create block client");
1384        assert_eq!(client.block_count(), 1);
1385        assert_eq!(client.block_size(), 512);
1386
1387        let buffer = vec![0xaa; 512];
1388        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1389        client
1390            .write_at(BufferSlice::Memory(&buffer), 512)
1391            .await
1392            .expect_err("Write past end should fail");
1393        client.flush().await.unwrap();
1394
1395        runner.shutdown().await;
1396        outer_runner.shutdown().await;
1397
1398        // Check that the write targeted the correct block (4 + 5 = 9)
1399        let data = vmo.read_to_vec(9 * 512, 512).unwrap();
1400        assert_eq!(&data[..], &buffer[..]);
1401    }
1402
1403    #[fuchsia::test]
1404    async fn offset_map_does_not_allow_partition_overwrite() {
1405        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1406        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1407        const PART_NAME: &str = "part";
1408
1409        let (block_device, partitions_dir) = setup_with_options(
1410            FakeServerOptions {
1411                block_count: Some(16),
1412                block_size: 512,
1413                flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1414                ..Default::default()
1415            },
1416            vec![PartitionInfo {
1417                label: PART_NAME.to_string(),
1418                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1419                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1420                start_block: 4,
1421                num_blocks: 2,
1422                flags: 0xabcd,
1423            }],
1424        )
1425        .await;
1426
1427        let partitions_dir_clone = partitions_dir.clone();
1428        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
1429            .await
1430            .expect("load should succeed");
1431
1432        let part_dir = vfs::serve_directory(
1433            partitions_dir.clone(),
1434            vfs::path::Path::validate_and_split("part-000").unwrap(),
1435            fio::PERM_READABLE,
1436        );
1437
1438        let part_block =
1439            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1440                .expect("Failed to open Block service");
1441
1442        // Attempting to open a session with an offset map that extends past the end of the device
1443        // should fail.
1444        let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1445        part_block
1446            .open_session_with_offset_map(
1447                server_end,
1448                None,
1449                Some(&[fblock::BlockOffsetMapping {
1450                    source_block_offset: 0,
1451                    target_block_offset: 1,
1452                    length: 2,
1453                }]),
1454            )
1455            .expect("FIDL error");
1456        session.get_fifo().await.expect_err("Session should be closed");
1457
1458        let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1459        part_block
1460            .open_session_with_offset_map(
1461                server_end,
1462                None,
1463                Some(&[fblock::BlockOffsetMapping {
1464                    source_block_offset: 0,
1465                    target_block_offset: 0,
1466                    length: 3,
1467                }]),
1468            )
1469            .expect("FIDL error");
1470        session.get_fifo().await.expect_err("Session should be closed");
1471
1472        runner.shutdown().await;
1473    }
1474}