gpt_component/
gpt.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::partition::PartitionBackend;
6use crate::partitions_directory::PartitionsDirectory;
7use anyhow::{anyhow, Context as _, Error};
8use block_client::{
9    BlockClient as _, BufferSlice, MutableBufferSlice, RemoteBlockClient, VmoId, WriteOptions,
10};
11use block_server::async_interface::SessionManager;
12use block_server::BlockServer;
13
14use fidl::endpoints::ServerEnd;
15use futures::lock::Mutex;
16use futures::stream::TryStreamExt as _;
17use std::collections::BTreeMap;
18use std::num::NonZero;
19use std::ops::Range;
20use std::sync::atomic::{AtomicBool, Ordering};
21use std::sync::{Arc, Weak};
22use zx::AsHandleRef as _;
23use {
24    fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_storage_partitions as fpartitions,
25    fuchsia_async as fasync,
26};
27
28fn partition_directory_entry_name(index: u32) -> String {
29    format!("part-{:03}", index)
30}
31
32/// A single partition in a GPT device.
33pub struct GptPartition {
34    gpt: Weak<GptManager>,
35    block_client: Arc<RemoteBlockClient>,
36    block_range: Range<u64>,
37    index: u32,
38}
39
40fn trace_id(trace_flow_id: Option<NonZero<u64>>) -> u64 {
41    trace_flow_id.map(|v| v.get()).unwrap_or_default()
42}
43
44impl GptPartition {
45    pub fn new(
46        gpt: &Arc<GptManager>,
47        block_client: Arc<RemoteBlockClient>,
48        index: u32,
49        block_range: Range<u64>,
50    ) -> Arc<Self> {
51        debug_assert!(block_range.end >= block_range.start);
52        Arc::new(Self { gpt: Arc::downgrade(gpt), block_client, block_range, index })
53    }
54
55    pub async fn terminate(&self) {
56        if let Err(error) = self.block_client.close().await {
57            log::warn!(error:?; "Failed to close block client");
58        }
59    }
60
61    pub fn index(&self) -> u32 {
62        self.index
63    }
64
65    pub fn block_size(&self) -> u32 {
66        self.block_client.block_size()
67    }
68
69    pub fn block_count(&self) -> u64 {
70        self.block_range.end - self.block_range.start
71    }
72
73    pub async fn attach_vmo(&self, vmo: &zx::Vmo) -> Result<VmoId, zx::Status> {
74        self.block_client.attach_vmo(vmo).await
75    }
76
77    pub async fn detach_vmo(&self, vmoid: VmoId) -> Result<(), zx::Status> {
78        self.block_client.detach_vmo(vmoid).await
79    }
80
81    pub fn open_passthrough_session(&self, session: ServerEnd<fblock::SessionMarker>) {
82        if let Some(gpt) = self.gpt.upgrade() {
83            let mappings = [fblock::BlockOffsetMapping {
84                source_block_offset: 0,
85                target_block_offset: self.block_range.start,
86                length: self.block_count(),
87            }];
88            if let Err(err) =
89                gpt.block_proxy.open_session_with_offset_map(session, None, Some(&mappings[..]))
90            {
91                // Client errors normally come back on `session` but that was already consumed.  The
92                // client will get a PEER_CLOSED without an epitaph.
93                log::warn!(err:?; "Failed to open passthrough session");
94            }
95        } else {
96            if let Err(err) = session.close_with_epitaph(zx::Status::BAD_STATE) {
97                log::warn!(err:?; "Failed to send session epitaph");
98            }
99        }
100    }
101
102    pub async fn get_info(&self) -> Result<block_server::DeviceInfo, zx::Status> {
103        if let Some(gpt) = self.gpt.upgrade() {
104            gpt.inner
105                .lock()
106                .await
107                .gpt
108                .partitions()
109                .get(&self.index)
110                .map(|info| convert_partition_info(info, self.block_client.block_flags()))
111                .ok_or(zx::Status::BAD_STATE)
112        } else {
113            Err(zx::Status::BAD_STATE)
114        }
115    }
116
117    pub async fn read(
118        &self,
119        device_block_offset: u64,
120        block_count: u32,
121        vmo_id: &VmoId,
122        vmo_offset: u64, // *bytes* not blocks
123        trace_flow_id: Option<NonZero<u64>>,
124    ) -> Result<(), zx::Status> {
125        let dev_offset = self
126            .absolute_offset(device_block_offset, block_count)
127            .map(|offset| offset * self.block_size() as u64)?;
128        let buffer = MutableBufferSlice::new_with_vmo_id(
129            vmo_id,
130            vmo_offset,
131            (block_count * self.block_size()) as u64,
132        );
133        self.block_client.read_at_traced(buffer, dev_offset, trace_id(trace_flow_id)).await
134    }
135
136    pub async fn write(
137        &self,
138        device_block_offset: u64,
139        block_count: u32,
140        vmo_id: &VmoId,
141        vmo_offset: u64, // *bytes* not blocks
142        opts: WriteOptions,
143        trace_flow_id: Option<NonZero<u64>>,
144    ) -> Result<(), zx::Status> {
145        let dev_offset = self
146            .absolute_offset(device_block_offset, block_count)
147            .map(|offset| offset * self.block_size() as u64)?;
148        let buffer = BufferSlice::new_with_vmo_id(
149            vmo_id,
150            vmo_offset,
151            (block_count * self.block_size()) as u64,
152        );
153        self.block_client
154            .write_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
155            .await
156    }
157
158    pub async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
159        self.block_client.flush_traced(trace_id(trace_flow_id)).await
160    }
161
162    pub async fn trim(
163        &self,
164        device_block_offset: u64,
165        block_count: u32,
166        trace_flow_id: Option<NonZero<u64>>,
167    ) -> Result<(), zx::Status> {
168        let dev_offset = self
169            .absolute_offset(device_block_offset, block_count)
170            .map(|offset| offset * self.block_size() as u64)?;
171        let len = block_count as u64 * self.block_size() as u64;
172        self.block_client.trim_traced(dev_offset..dev_offset + len, trace_id(trace_flow_id)).await
173    }
174
175    // Converts a relative range specified by [offset, offset+len) into an absolute offset in the
176    // GPT device, performing bounds checking within the partition.  Returns ZX_ERR_OUT_OF_RANGE for
177    // an invalid offset/len.
178    fn absolute_offset(&self, mut offset: u64, len: u32) -> Result<u64, zx::Status> {
179        offset = offset.checked_add(self.block_range.start).ok_or(zx::Status::OUT_OF_RANGE)?;
180        let end = offset.checked_add(len as u64).ok_or(zx::Status::OUT_OF_RANGE)?;
181        if end > self.block_range.end {
182            Err(zx::Status::OUT_OF_RANGE)
183        } else {
184            Ok(offset)
185        }
186    }
187}
188
189fn convert_partition_info(
190    info: &gpt::PartitionInfo,
191    device_flags: fblock::Flag,
192) -> block_server::DeviceInfo {
193    block_server::DeviceInfo::Partition(block_server::PartitionInfo {
194        device_flags,
195        block_range: Some(info.start_block..info.start_block + info.num_blocks),
196        type_guid: info.type_guid.to_bytes(),
197        instance_guid: info.instance_guid.to_bytes(),
198        name: info.label.clone(),
199        flags: info.flags,
200    })
201}
202
203struct PendingTransaction {
204    transaction: gpt::Transaction,
205    client_koid: zx::Koid,
206    // A list of indexes for partitions which were added in the transaction.  When committing, all
207    // newly created partitions are published.
208    added_partitions: Vec<u32>,
209    // A task which waits for the client end to be closed and clears the pending transaction.
210    _signal_task: fasync::Task<()>,
211}
212
213struct Inner {
214    gpt: gpt::Gpt,
215    partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
216    // Exposes all partitions for discovery by other components.  Should be kept in sync with
217    // `partitions`.
218    partitions_dir: PartitionsDirectory,
219    pending_transaction: Option<PendingTransaction>,
220}
221
222impl Inner {
223    /// Ensures that `transaction` matches our pending transaction.
224    fn ensure_transaction_matches(&self, transaction: &zx::EventPair) -> Result<(), zx::Status> {
225        if let Some(pending) = self.pending_transaction.as_ref() {
226            if transaction.get_koid()? == pending.client_koid {
227                Ok(())
228            } else {
229                Err(zx::Status::BAD_HANDLE)
230            }
231        } else {
232            Err(zx::Status::BAD_STATE)
233        }
234    }
235
236    async fn bind_partition(
237        &mut self,
238        parent: &Arc<GptManager>,
239        index: u32,
240        info: gpt::PartitionInfo,
241    ) -> Result<(), Error> {
242        log::info!("GPT part {index}: {info:?}");
243        let partition = PartitionBackend::new(GptPartition::new(
244            parent,
245            self.gpt.client().clone(),
246            index,
247            info.start_block
248                ..info
249                    .start_block
250                    .checked_add(info.num_blocks)
251                    .ok_or_else(|| anyhow!("Overflow in partition range"))?,
252        ));
253        let block_server = Arc::new(BlockServer::new(parent.block_size, partition));
254        self.partitions_dir.add_entry(
255            &partition_directory_entry_name(index),
256            Arc::downgrade(&block_server),
257            Arc::downgrade(parent),
258            index as usize,
259        );
260        self.partitions.insert(index, block_server);
261        Ok(())
262    }
263
264    async fn bind_all_partitions(&mut self, parent: &Arc<GptManager>) -> Result<(), Error> {
265        self.partitions.clear();
266        self.partitions_dir.clear();
267        for (index, info) in self.gpt.partitions().clone() {
268            self.bind_partition(parent, index, info).await?;
269        }
270        Ok(())
271    }
272
273    fn add_partition(&mut self, info: gpt::PartitionInfo) -> Result<usize, gpt::AddPartitionError> {
274        let pending = self.pending_transaction.as_mut().unwrap();
275        let idx = self.gpt.add_partition(&mut pending.transaction, info)?;
276        pending.added_partitions.push(idx as u32);
277        Ok(idx)
278    }
279}
280
281/// Runs a GPT device.
282pub struct GptManager {
283    block_proxy: fblock::BlockProxy,
284    block_size: u32,
285    block_count: u64,
286    inner: Mutex<Inner>,
287    shutdown: AtomicBool,
288}
289
290impl std::fmt::Debug for GptManager {
291    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
292        f.debug_struct("GptManager")
293            .field("block_size", &self.block_size)
294            .field("block_count", &self.block_count)
295            .finish()
296    }
297}
298
299impl GptManager {
300    pub async fn new(
301        block_proxy: fblock::BlockProxy,
302        partitions_dir: Arc<vfs::directory::immutable::Simple>,
303    ) -> Result<Arc<Self>, Error> {
304        log::info!("Binding to GPT");
305        let client = Arc::new(RemoteBlockClient::new(block_proxy.clone()).await?);
306        let block_size = client.block_size();
307        let block_count = client.block_count();
308        let gpt = gpt::Gpt::open(client).await.context("Failed to load GPT")?;
309
310        let this = Arc::new(Self {
311            block_proxy,
312            block_size,
313            block_count,
314            inner: Mutex::new(Inner {
315                gpt,
316                partitions: BTreeMap::new(),
317                partitions_dir: PartitionsDirectory::new(partitions_dir),
318                pending_transaction: None,
319            }),
320            shutdown: AtomicBool::new(false),
321        });
322        log::info!("Bind to GPT OK, binding partitions");
323        this.inner.lock().await.bind_all_partitions(&this).await?;
324        log::info!("Starting all partitions OK!");
325        Ok(this)
326    }
327
328    pub fn block_size(&self) -> u32 {
329        self.block_size
330    }
331
332    pub fn block_count(&self) -> u64 {
333        self.block_count
334    }
335
336    pub async fn create_transaction(self: &Arc<Self>) -> Result<zx::EventPair, zx::Status> {
337        let mut inner = self.inner.lock().await;
338        if inner.pending_transaction.is_some() {
339            return Err(zx::Status::ALREADY_EXISTS);
340        }
341        let transaction = inner.gpt.create_transaction().unwrap();
342        let (client_end, server_end) = zx::EventPair::create();
343        let client_koid = client_end.get_koid()?;
344        let signal_waiter = fasync::OnSignals::new(server_end, zx::Signals::EVENTPAIR_PEER_CLOSED);
345        let this = self.clone();
346        let task = fasync::Task::spawn(async move {
347            let _ = signal_waiter.await;
348            let mut inner = this.inner.lock().await;
349            if inner.pending_transaction.as_ref().map_or(false, |t| t.client_koid == client_koid) {
350                inner.pending_transaction = None;
351            }
352        });
353        inner.pending_transaction = Some(PendingTransaction {
354            transaction,
355            client_koid,
356            added_partitions: vec![],
357            _signal_task: task,
358        });
359        Ok(client_end)
360    }
361
362    pub async fn commit_transaction(
363        self: &Arc<Self>,
364        transaction: zx::EventPair,
365    ) -> Result<(), zx::Status> {
366        let mut inner = self.inner.lock().await;
367        inner.ensure_transaction_matches(&transaction)?;
368        let pending = std::mem::take(&mut inner.pending_transaction).unwrap();
369        if let Err(err) = inner.gpt.commit_transaction(pending.transaction).await {
370            log::error!(err:?; "Failed to commit transaction");
371            return Err(zx::Status::IO);
372        }
373        for idx in pending.added_partitions {
374            let info = inner.gpt.partitions().get(&idx).ok_or(zx::Status::BAD_STATE)?.clone();
375            inner.bind_partition(self, idx, info).await.map_err(|err| {
376                log::error!(err:?; "Failed to bind partition");
377                zx::Status::BAD_STATE
378            })?;
379        }
380        Ok(())
381    }
382
383    pub async fn add_partition(
384        &self,
385        request: fpartitions::PartitionsManagerAddPartitionRequest,
386    ) -> Result<(), zx::Status> {
387        let mut inner = self.inner.lock().await;
388        inner.ensure_transaction_matches(
389            request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
390        )?;
391        let info = gpt::PartitionInfo {
392            label: request.name.ok_or(zx::Status::INVALID_ARGS)?,
393            type_guid: request
394                .type_guid
395                .map(|value| gpt::Guid::from_bytes(value.value))
396                .ok_or(zx::Status::INVALID_ARGS)?,
397            instance_guid: request
398                .instance_guid
399                .map(|value| gpt::Guid::from_bytes(value.value))
400                .unwrap_or_else(|| gpt::Guid::generate()),
401            start_block: 0,
402            num_blocks: request.num_blocks.ok_or(zx::Status::INVALID_ARGS)?,
403            flags: request.flags.unwrap_or_default(),
404        };
405        let idx = inner.add_partition(info)?;
406        let partition =
407            inner.pending_transaction.as_ref().unwrap().transaction.partitions.get(idx).unwrap();
408        log::info!(
409            "Allocated partition {:?} at {:?}",
410            partition.label,
411            partition.start_block..partition.start_block + partition.num_blocks
412        );
413        Ok(())
414    }
415
416    pub async fn handle_partitions_requests(
417        &self,
418        gpt_index: usize,
419        mut requests: fpartitions::PartitionRequestStream,
420    ) -> Result<(), zx::Status> {
421        while let Some(request) = requests.try_next().await.unwrap() {
422            match request {
423                fpartitions::PartitionRequest::UpdateMetadata { payload, responder } => {
424                    responder
425                        .send(
426                            self.update_partition_metadata(gpt_index, payload)
427                                .await
428                                .map_err(|status| status.into_raw()),
429                        )
430                        .unwrap_or_else(
431                            |err| log::error!(err:?; "Failed to send UpdateMetadata response"),
432                        );
433                }
434            }
435        }
436        Ok(())
437    }
438
439    async fn update_partition_metadata(
440        &self,
441        gpt_index: usize,
442        request: fpartitions::PartitionUpdateMetadataRequest,
443    ) -> Result<(), zx::Status> {
444        let mut inner = self.inner.lock().await;
445        inner.ensure_transaction_matches(
446            request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
447        )?;
448
449        let transaction = &mut inner.pending_transaction.as_mut().unwrap().transaction;
450        let entry = transaction.partitions.get_mut(gpt_index).ok_or(zx::Status::BAD_STATE)?;
451        if let Some(type_guid) = request.type_guid.as_ref().cloned() {
452            entry.type_guid = gpt::Guid::from_bytes(type_guid.value);
453        }
454        if let Some(flags) = request.flags.as_ref() {
455            entry.flags = *flags;
456        }
457        Ok(())
458    }
459
460    pub async fn reset_partition_table(
461        self: &Arc<Self>,
462        partitions: Vec<gpt::PartitionInfo>,
463    ) -> Result<(), zx::Status> {
464        let mut inner = self.inner.lock().await;
465        if inner.pending_transaction.is_some() {
466            return Err(zx::Status::BAD_STATE);
467        }
468
469        log::info!("Resetting gpt.  Expect data loss!!!");
470        let mut transaction = inner.gpt.create_transaction().unwrap();
471        transaction.partitions = partitions;
472        inner.gpt.commit_transaction(transaction).await?;
473
474        log::info!("Rebinding partitions...");
475        if let Err(err) = inner.bind_all_partitions(&self).await {
476            log::error!(err:?; "Failed to rebind partitions");
477            return Err(zx::Status::BAD_STATE);
478        }
479        log::info!("Rebinding partitions OK!");
480        Ok(())
481    }
482
483    pub async fn shutdown(self: Arc<Self>) {
484        log::info!("Shutting down gpt");
485        let mut inner = self.inner.lock().await;
486        inner.partitions_dir.clear();
487        inner.partitions.clear();
488        self.shutdown.store(true, Ordering::Relaxed);
489        log::info!("Shutting down gpt OK");
490    }
491}
492
493impl Drop for GptManager {
494    fn drop(&mut self) {
495        assert!(self.shutdown.load(Ordering::Relaxed), "Did you forget to shutdown?");
496    }
497}
498
499#[cfg(test)]
500mod tests {
501    use super::GptManager;
502    use block_client::{BlockClient as _, BufferSlice, MutableBufferSlice, RemoteBlockClient};
503    use block_server::WriteOptions;
504    use fake_block_server::{FakeServer, FakeServerOptions};
505    use fidl::HandleBased as _;
506    use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
507    use gpt::{Gpt, Guid, PartitionInfo};
508    use std::sync::atomic::{AtomicBool, Ordering};
509    use std::sync::Arc;
510    use vfs::directory::entry_container::Directory as _;
511    use vfs::ObjectRequest;
512    use {
513        fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_hardware_block_volume as fvolume,
514        fidl_fuchsia_io as fio, fidl_fuchsia_storage_partitions as fpartitions,
515        fuchsia_async as fasync,
516    };
517
518    async fn setup(
519        block_size: u32,
520        block_count: u64,
521        partitions: Vec<PartitionInfo>,
522    ) -> (Arc<FakeServer>, Arc<vfs::directory::immutable::Simple>) {
523        setup_with_options(
524            FakeServerOptions { block_count: Some(block_count), block_size, ..Default::default() },
525            partitions,
526        )
527        .await
528    }
529
530    async fn setup_with_options(
531        opts: FakeServerOptions<'_>,
532        partitions: Vec<PartitionInfo>,
533    ) -> (Arc<FakeServer>, Arc<vfs::directory::immutable::Simple>) {
534        let server = Arc::new(FakeServer::from(opts));
535        {
536            let (block_client, block_server) =
537                fidl::endpoints::create_proxy::<fblock::BlockMarker>();
538            let volume_stream = fidl::endpoints::ServerEnd::<fvolume::VolumeMarker>::from(
539                block_server.into_channel(),
540            )
541            .into_stream();
542            let server_clone = server.clone();
543            let _task = fasync::Task::spawn(async move { server_clone.serve(volume_stream).await });
544            let client = Arc::new(RemoteBlockClient::new(block_client).await.unwrap());
545            Gpt::format(client, partitions).await.unwrap();
546        }
547        (server, vfs::directory::immutable::simple())
548    }
549
550    #[fuchsia::test]
551    async fn load_unformatted_gpt() {
552        let vmo = zx::Vmo::create(4096).unwrap();
553        let server = Arc::new(FakeServer::from_vmo(512, vmo));
554
555        GptManager::new(server.block_proxy(), vfs::directory::immutable::simple())
556            .await
557            .expect_err("load should fail");
558    }
559
560    #[fuchsia::test]
561    async fn load_formatted_empty_gpt() {
562        let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
563
564        let runner = GptManager::new(block_device.block_proxy(), partitions_dir)
565            .await
566            .expect("load should succeed");
567        runner.shutdown().await;
568    }
569
570    #[fuchsia::test]
571    async fn load_formatted_gpt_with_one_partition() {
572        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
573        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
574        const PART_NAME: &str = "part";
575
576        let (block_device, partitions_dir) = setup(
577            512,
578            8,
579            vec![PartitionInfo {
580                label: PART_NAME.to_string(),
581                type_guid: Guid::from_bytes(PART_TYPE_GUID),
582                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
583                start_block: 4,
584                num_blocks: 1,
585                flags: 0,
586            }],
587        )
588        .await;
589
590        let partitions_dir_clone = partitions_dir.clone();
591        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
592            .await
593            .expect("load should succeed");
594        partitions_dir.get_entry("part-000").expect("No entry found");
595        partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
596        runner.shutdown().await;
597    }
598
599    #[fuchsia::test]
600    async fn load_formatted_gpt_with_two_partitions() {
601        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
602        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
603        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
604        const PART_1_NAME: &str = "part1";
605        const PART_2_NAME: &str = "part2";
606
607        let (block_device, partitions_dir) = setup(
608            512,
609            8,
610            vec![
611                PartitionInfo {
612                    label: PART_1_NAME.to_string(),
613                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
614                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
615                    start_block: 4,
616                    num_blocks: 1,
617                    flags: 0,
618                },
619                PartitionInfo {
620                    label: PART_2_NAME.to_string(),
621                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
622                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
623                    start_block: 5,
624                    num_blocks: 1,
625                    flags: 0,
626                },
627            ],
628        )
629        .await;
630
631        let partitions_dir_clone = partitions_dir.clone();
632        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
633            .await
634            .expect("load should succeed");
635        partitions_dir.get_entry("part-000").expect("No entry found");
636        partitions_dir.get_entry("part-001").expect("No entry found");
637        partitions_dir.get_entry("part-002").map(|_| ()).expect_err("Extra entry found");
638        runner.shutdown().await;
639    }
640
641    #[fuchsia::test]
642    async fn partition_io() {
643        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
644        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
645        const PART_NAME: &str = "part";
646
647        let (block_device, partitions_dir) = setup(
648            512,
649            8,
650            vec![PartitionInfo {
651                label: PART_NAME.to_string(),
652                type_guid: Guid::from_bytes(PART_TYPE_GUID),
653                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
654                start_block: 4,
655                num_blocks: 2,
656                flags: 0,
657            }],
658        )
659        .await;
660
661        let partitions_dir_clone = partitions_dir.clone();
662        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
663            .await
664            .expect("load should succeed");
665
666        let proxy = vfs::serve_directory(
667            partitions_dir.clone(),
668            vfs::path::Path::validate_and_split("part-000").unwrap(),
669            fio::PERM_READABLE,
670        );
671        let block =
672            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
673                .expect("Failed to open block service");
674        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
675
676        assert_eq!(client.block_count(), 2);
677        assert_eq!(client.block_size(), 512);
678
679        let buf = vec![0xabu8; 512];
680        client.write_at(BufferSlice::Memory(&buf[..]), 0).await.expect("write_at failed");
681        client
682            .write_at(BufferSlice::Memory(&buf[..]), 1024)
683            .await
684            .expect_err("write_at should fail when writing past partition end");
685        let mut buf2 = vec![0u8; 512];
686        client.read_at(MutableBufferSlice::Memory(&mut buf2[..]), 0).await.expect("read_at failed");
687        assert_eq!(buf, buf2);
688        client
689            .read_at(MutableBufferSlice::Memory(&mut buf2[..]), 1024)
690            .await
691            .expect_err("read_at should fail when reading past partition end");
692        client.trim(512..1024).await.expect("trim failed");
693        client.trim(1..512).await.expect_err("trim with invalid range should fail");
694        client.trim(1024..1536).await.expect_err("trim past end of partition should fail");
695        runner.shutdown().await;
696
697        // Ensure writes persisted to the partition.
698        let mut buf = vec![0u8; 512];
699        let client = RemoteBlockClient::new(block_device.block_proxy()).await.unwrap();
700        client.read_at(MutableBufferSlice::Memory(&mut buf[..]), 2048).await.unwrap();
701        assert_eq!(&buf[..], &[0xabu8; 512]);
702    }
703
704    #[fuchsia::test]
705    async fn load_formatted_gpt_with_invalid_primary_header() {
706        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
707        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
708        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
709        const PART_1_NAME: &str = "part1";
710        const PART_2_NAME: &str = "part2";
711
712        let (block_device, partitions_dir) = setup(
713            512,
714            8,
715            vec![
716                PartitionInfo {
717                    label: PART_1_NAME.to_string(),
718                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
719                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
720                    start_block: 4,
721                    num_blocks: 1,
722                    flags: 0,
723                },
724                PartitionInfo {
725                    label: PART_2_NAME.to_string(),
726                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
727                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
728                    start_block: 5,
729                    num_blocks: 1,
730                    flags: 0,
731                },
732            ],
733        )
734        .await;
735        {
736            let (client, stream) =
737                fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
738            let server = block_device.clone();
739            let _task = fasync::Task::spawn(async move { server.serve(stream).await });
740            let client = RemoteBlockClient::new(client).await.unwrap();
741            client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 512).await.unwrap();
742        }
743
744        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
745            .await
746            .expect("load should succeed");
747        partitions_dir.get_entry("part-000").expect("No entry found");
748        partitions_dir.get_entry("part-001").expect("No entry found");
749        runner.shutdown().await;
750    }
751
752    #[fuchsia::test]
753    async fn load_formatted_gpt_with_invalid_primary_partition_table() {
754        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
755        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
756        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
757        const PART_1_NAME: &str = "part1";
758        const PART_2_NAME: &str = "part2";
759
760        let (block_device, partitions_dir) = setup(
761            512,
762            8,
763            vec![
764                PartitionInfo {
765                    label: PART_1_NAME.to_string(),
766                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
767                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
768                    start_block: 4,
769                    num_blocks: 1,
770                    flags: 0,
771                },
772                PartitionInfo {
773                    label: PART_2_NAME.to_string(),
774                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
775                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
776                    start_block: 5,
777                    num_blocks: 1,
778                    flags: 0,
779                },
780            ],
781        )
782        .await;
783        {
784            let (client, stream) =
785                fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
786            let server = block_device.clone();
787            let _task = fasync::Task::spawn(async move { server.serve(stream).await });
788            let client = RemoteBlockClient::new(client).await.unwrap();
789            client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 1024).await.unwrap();
790        }
791
792        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
793            .await
794            .expect("load should succeed");
795        partitions_dir.get_entry("part-000").expect("No entry found");
796        partitions_dir.get_entry("part-001").expect("No entry found");
797        runner.shutdown().await;
798    }
799
800    #[fuchsia::test]
801    async fn force_access_passed_through() {
802        const BLOCK_SIZE: u32 = 512;
803        const BLOCK_COUNT: u64 = 1024;
804
805        struct Observer(Arc<AtomicBool>);
806
807        impl fake_block_server::Observer for Observer {
808            fn write(
809                &self,
810                _device_block_offset: u64,
811                _block_count: u32,
812                _vmo: &Arc<zx::Vmo>,
813                _vmo_offset: u64,
814                opts: WriteOptions,
815            ) -> fake_block_server::WriteAction {
816                assert_eq!(
817                    opts.contains(WriteOptions::FORCE_ACCESS),
818                    self.0.load(Ordering::Relaxed)
819                );
820                fake_block_server::WriteAction::Write
821            }
822        }
823
824        let expect_force_access = Arc::new(AtomicBool::new(false));
825        let (server, partitions_dir) = setup_with_options(
826            FakeServerOptions {
827                block_count: Some(BLOCK_COUNT),
828                block_size: BLOCK_SIZE,
829                observer: Some(Box::new(Observer(expect_force_access.clone()))),
830                ..Default::default()
831            },
832            vec![PartitionInfo {
833                label: "foo".to_string(),
834                type_guid: Guid::from_bytes([1; 16]),
835                instance_guid: Guid::from_bytes([2; 16]),
836                start_block: 4,
837                num_blocks: 1,
838                flags: 0,
839            }],
840        )
841        .await;
842
843        let manager = GptManager::new(server.block_proxy(), partitions_dir.clone()).await.unwrap();
844
845        let proxy = vfs::serve_directory(
846            partitions_dir.clone(),
847            vfs::path::Path::validate_and_split("part-000").unwrap(),
848            fio::PERM_READABLE,
849        );
850        let block =
851            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
852                .expect("Failed to open block service");
853        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
854
855        let buffer = vec![0; BLOCK_SIZE as usize];
856        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
857
858        expect_force_access.store(true, Ordering::Relaxed);
859
860        client
861            .write_at_with_opts(BufferSlice::Memory(&buffer), 0, WriteOptions::FORCE_ACCESS)
862            .await
863            .unwrap();
864
865        manager.shutdown().await;
866    }
867
868    #[fuchsia::test]
869    async fn commit_transaction() {
870        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
871        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
872        const PART_1_NAME: &str = "part";
873        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
874        const PART_2_NAME: &str = "part2";
875
876        let (block_device, partitions_dir) = setup(
877            512,
878            16,
879            vec![
880                PartitionInfo {
881                    label: PART_1_NAME.to_string(),
882                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
883                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
884                    start_block: 4,
885                    num_blocks: 1,
886                    flags: 0,
887                },
888                PartitionInfo {
889                    label: PART_2_NAME.to_string(),
890                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
891                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
892                    start_block: 5,
893                    num_blocks: 1,
894                    flags: 0,
895                },
896            ],
897        )
898        .await;
899        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
900            .await
901            .expect("load should succeed");
902
903        let (part_0_dir, server_end_0) = fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
904        let (part_1_dir, server_end_1) = fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
905        let flags =
906            fio::Flags::PERM_CONNECT | fio::Flags::PERM_TRAVERSE | fio::Flags::PERM_ENUMERATE;
907        let options = fio::Options::default();
908        let scope = vfs::execution_scope::ExecutionScope::new();
909        partitions_dir
910            .clone()
911            .open3(
912                scope.clone(),
913                vfs::path::Path::validate_and_split("part-000").unwrap(),
914                flags.clone(),
915                &mut ObjectRequest::new(flags, &options, server_end_0.into_channel().into()),
916            )
917            .unwrap();
918        partitions_dir
919            .clone()
920            .open3(
921                scope.clone(),
922                vfs::path::Path::validate_and_split("part-001").unwrap(),
923                flags.clone(),
924                &mut ObjectRequest::new(flags, &options, server_end_1.into_channel().into()),
925            )
926            .unwrap();
927        let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
928            &part_0_dir,
929            "partition",
930        )
931        .expect("Failed to open Partition service");
932        let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
933            &part_1_dir,
934            "partition",
935        )
936        .expect("Failed to open Partition service");
937
938        let transaction = runner.create_transaction().await.expect("Failed to create transaction");
939        part_0_proxy
940            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
941                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
942                type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid {
943                    value: [0xffu8; 16],
944                }),
945                ..Default::default()
946            })
947            .await
948            .expect("FIDL error")
949            .expect("Failed to update_metadata");
950        part_1_proxy
951            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
952                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
953                flags: Some(1234),
954                ..Default::default()
955            })
956            .await
957            .expect("FIDL error")
958            .expect("Failed to update_metadata");
959        runner.commit_transaction(transaction).await.expect("Failed to commit transaction");
960
961        // Ensure the changes have propagated to the correct partitions.
962        let part_0_block =
963            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_0_dir, "volume")
964                .expect("Failed to open Volume service");
965        let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
966        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
967        assert_eq!(guid.unwrap().value, [0xffu8; 16]);
968        let part_1_block =
969            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_1_dir, "volume")
970                .expect("Failed to open Volume service");
971        let metadata =
972            part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
973        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
974        assert_eq!(metadata.flags, Some(1234));
975
976        runner.shutdown().await;
977    }
978
979    #[fuchsia::test]
980    async fn reset_partition_tables() {
981        // The test will reset the tables from ["part", "part2"] to
982        // ["part3", <empty>, "part4", <125 empty entries>].
983        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
984        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
985        const PART_1_NAME: &str = "part";
986        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
987        const PART_2_NAME: &str = "part2";
988        const PART_3_NAME: &str = "part3";
989        const PART_4_NAME: &str = "part4";
990
991        let (block_device, partitions_dir) = setup(
992            512,
993            1048576 / 512,
994            vec![
995                PartitionInfo {
996                    label: PART_1_NAME.to_string(),
997                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
998                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
999                    start_block: 4,
1000                    num_blocks: 1,
1001                    flags: 0,
1002                },
1003                PartitionInfo {
1004                    label: PART_2_NAME.to_string(),
1005                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1006                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1007                    start_block: 5,
1008                    num_blocks: 1,
1009                    flags: 0,
1010                },
1011            ],
1012        )
1013        .await;
1014        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1015            .await
1016            .expect("load should succeed");
1017        let nil_entry = PartitionInfo {
1018            label: "".to_string(),
1019            type_guid: Guid::from_bytes([0u8; 16]),
1020            instance_guid: Guid::from_bytes([0u8; 16]),
1021            start_block: 0,
1022            num_blocks: 0,
1023            flags: 0,
1024        };
1025        let mut new_partitions = vec![nil_entry; 128];
1026        new_partitions[0] = PartitionInfo {
1027            label: PART_3_NAME.to_string(),
1028            type_guid: Guid::from_bytes(PART_TYPE_GUID),
1029            instance_guid: Guid::from_bytes([1u8; 16]),
1030            start_block: 64,
1031            num_blocks: 2,
1032            flags: 0,
1033        };
1034        new_partitions[2] = PartitionInfo {
1035            label: PART_4_NAME.to_string(),
1036            type_guid: Guid::from_bytes(PART_TYPE_GUID),
1037            instance_guid: Guid::from_bytes([2u8; 16]),
1038            start_block: 66,
1039            num_blocks: 4,
1040            flags: 0,
1041        };
1042        runner.reset_partition_table(new_partitions).await.expect("reset_partition_table failed");
1043        partitions_dir.get_entry("part-000").expect("No entry found");
1044        partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
1045        partitions_dir.get_entry("part-002").expect("No entry found");
1046
1047        let proxy = vfs::serve_directory(
1048            partitions_dir.clone(),
1049            vfs::path::Path::validate_and_split("part-000").unwrap(),
1050            fio::PERM_READABLE,
1051        );
1052        let block =
1053            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1054                .expect("Failed to open block service");
1055        let (status, name) = block.get_name().await.expect("FIDL error");
1056        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1057        assert_eq!(name.unwrap(), PART_3_NAME);
1058
1059        runner.shutdown().await;
1060    }
1061
1062    #[fuchsia::test]
1063    async fn reset_partition_tables_fails_if_too_many_partitions() {
1064        let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
1065        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1066            .await
1067            .expect("load should succeed");
1068        let nil_entry = PartitionInfo {
1069            label: "".to_string(),
1070            type_guid: Guid::from_bytes([0u8; 16]),
1071            instance_guid: Guid::from_bytes([0u8; 16]),
1072            start_block: 0,
1073            num_blocks: 0,
1074            flags: 0,
1075        };
1076        let new_partitions = vec![nil_entry; 128];
1077        runner
1078            .reset_partition_table(new_partitions)
1079            .await
1080            .expect_err("reset_partition_table should fail");
1081
1082        runner.shutdown().await;
1083    }
1084
1085    #[fuchsia::test]
1086    async fn reset_partition_tables_fails_if_too_large_partitions() {
1087        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1088        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1089            .await
1090            .expect("load should succeed");
1091        let new_partitions = vec![
1092            PartitionInfo {
1093                label: "a".to_string(),
1094                type_guid: Guid::from_bytes([1u8; 16]),
1095                instance_guid: Guid::from_bytes([1u8; 16]),
1096                start_block: 4,
1097                num_blocks: 2,
1098                flags: 0,
1099            },
1100            PartitionInfo {
1101                label: "b".to_string(),
1102                type_guid: Guid::from_bytes([2u8; 16]),
1103                instance_guid: Guid::from_bytes([2u8; 16]),
1104                start_block: 6,
1105                num_blocks: 200,
1106                flags: 0,
1107            },
1108        ];
1109        runner
1110            .reset_partition_table(new_partitions)
1111            .await
1112            .expect_err("reset_partition_table should fail");
1113
1114        runner.shutdown().await;
1115    }
1116
1117    #[fuchsia::test]
1118    async fn reset_partition_tables_fails_if_partition_overlaps_metadata() {
1119        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1120        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1121            .await
1122            .expect("load should succeed");
1123        let new_partitions = vec![PartitionInfo {
1124            label: "a".to_string(),
1125            type_guid: Guid::from_bytes([1u8; 16]),
1126            instance_guid: Guid::from_bytes([1u8; 16]),
1127            start_block: 1,
1128            num_blocks: 2,
1129            flags: 0,
1130        }];
1131        runner
1132            .reset_partition_table(new_partitions)
1133            .await
1134            .expect_err("reset_partition_table should fail");
1135
1136        runner.shutdown().await;
1137    }
1138
1139    #[fuchsia::test]
1140    async fn reset_partition_tables_fails_if_partitions_overlap() {
1141        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1142        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1143            .await
1144            .expect("load should succeed");
1145        let new_partitions = vec![
1146            PartitionInfo {
1147                label: "a".to_string(),
1148                type_guid: Guid::from_bytes([1u8; 16]),
1149                instance_guid: Guid::from_bytes([1u8; 16]),
1150                start_block: 32,
1151                num_blocks: 2,
1152                flags: 0,
1153            },
1154            PartitionInfo {
1155                label: "b".to_string(),
1156                type_guid: Guid::from_bytes([2u8; 16]),
1157                instance_guid: Guid::from_bytes([2u8; 16]),
1158                start_block: 33,
1159                num_blocks: 1,
1160                flags: 0,
1161            },
1162        ];
1163        runner
1164            .reset_partition_table(new_partitions)
1165            .await
1166            .expect_err("reset_partition_table should fail");
1167
1168        runner.shutdown().await;
1169    }
1170
1171    #[fuchsia::test]
1172    async fn add_partition() {
1173        let (block_device, partitions_dir) = setup(512, 64, vec![PartitionInfo::nil(); 64]).await;
1174        let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1175            .await
1176            .expect("load should succeed");
1177
1178        let transaction = runner.create_transaction().await.expect("Create transaction failed");
1179        let request = fpartitions::PartitionsManagerAddPartitionRequest {
1180            transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1181            name: Some("a".to_string()),
1182            type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid { value: [1u8; 16] }),
1183            num_blocks: Some(2),
1184            ..Default::default()
1185        };
1186        runner.add_partition(request).await.expect("add_partition failed");
1187        runner.commit_transaction(transaction).await.expect("add_partition failed");
1188
1189        let proxy = vfs::serve_directory(
1190            partitions_dir.clone(),
1191            vfs::path::Path::validate_and_split("part-000").unwrap(),
1192            fio::PERM_READABLE,
1193        );
1194        let block =
1195            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1196                .expect("Failed to open block service");
1197        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1198
1199        assert_eq!(client.block_count(), 2);
1200        assert_eq!(client.block_size(), 512);
1201
1202        runner.shutdown().await;
1203    }
1204
1205    #[fuchsia::test]
1206    async fn partition_info() {
1207        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1208        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1209        const PART_NAME: &str = "part";
1210
1211        let (block_device, partitions_dir) = setup_with_options(
1212            FakeServerOptions {
1213                block_count: Some(8),
1214                block_size: 512,
1215                flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1216                ..Default::default()
1217            },
1218            vec![PartitionInfo {
1219                label: PART_NAME.to_string(),
1220                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1221                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1222                start_block: 4,
1223                num_blocks: 1,
1224                flags: 0xabcd,
1225            }],
1226        )
1227        .await;
1228
1229        let partitions_dir_clone = partitions_dir.clone();
1230        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
1231            .await
1232            .expect("load should succeed");
1233
1234        let part_dir = vfs::serve_directory(
1235            partitions_dir.clone(),
1236            vfs::path::Path::validate_and_split("part-000").unwrap(),
1237            fio::PERM_READABLE,
1238        );
1239        let part_block =
1240            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_dir, "volume")
1241                .expect("Failed to open Volume service");
1242        let info = part_block.get_info().await.expect("FIDL error").expect("get_info failed");
1243        assert_eq!(info.block_count, 1);
1244        assert_eq!(info.block_size, 512);
1245        assert_eq!(info.flags, fblock::Flag::READONLY | fblock::Flag::REMOVABLE);
1246
1247        let metadata =
1248            part_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1249        assert_eq!(metadata.name, Some(PART_NAME.to_string()));
1250        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1251        assert_eq!(metadata.instance_guid.unwrap().value, PART_INSTANCE_GUID);
1252        assert_eq!(metadata.start_block_offset, Some(4));
1253        assert_eq!(metadata.num_blocks, Some(1));
1254        assert_eq!(metadata.flags, Some(0xabcd));
1255
1256        runner.shutdown().await;
1257    }
1258
1259    #[fuchsia::test]
1260    async fn nested_gpt() {
1261        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1262        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1263        const PART_NAME: &str = "part";
1264
1265        let vmo = zx::Vmo::create(64 * 512).unwrap();
1266        let vmo_clone = vmo.create_child(zx::VmoChildOptions::REFERENCE, 0, 0).unwrap();
1267        let (outer_block_device, outer_partitions_dir) = setup_with_options(
1268            FakeServerOptions {
1269                vmo: Some(vmo_clone),
1270                block_size: 512,
1271                flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1272                ..Default::default()
1273            },
1274            vec![PartitionInfo {
1275                label: PART_NAME.to_string(),
1276                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1277                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1278                start_block: 4,
1279                num_blocks: 16,
1280                flags: 0xabcd,
1281            }],
1282        )
1283        .await;
1284
1285        let outer_partitions_dir_clone = outer_partitions_dir.clone();
1286        let outer_runner =
1287            GptManager::new(outer_block_device.block_proxy(), outer_partitions_dir_clone)
1288                .await
1289                .expect("load should succeed");
1290
1291        let outer_part_dir = vfs::serve_directory(
1292            outer_partitions_dir.clone(),
1293            vfs::path::Path::validate_and_split("part-000").unwrap(),
1294            fio::PERM_READABLE,
1295        );
1296        let part_block =
1297            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&outer_part_dir, "volume")
1298                .expect("Failed to open Block service");
1299
1300        let client = Arc::new(RemoteBlockClient::new(part_block.clone()).await.unwrap());
1301        let _ = gpt::Gpt::format(
1302            client,
1303            vec![PartitionInfo {
1304                label: PART_NAME.to_string(),
1305                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1306                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1307                start_block: 5,
1308                num_blocks: 1,
1309                flags: 0xabcd,
1310            }],
1311        )
1312        .await
1313        .unwrap();
1314
1315        let partitions_dir = vfs::directory::immutable::simple();
1316        let partitions_dir_clone = partitions_dir.clone();
1317        let runner =
1318            GptManager::new(part_block, partitions_dir_clone).await.expect("load should succeed");
1319        let part_dir = vfs::serve_directory(
1320            partitions_dir.clone(),
1321            vfs::path::Path::validate_and_split("part-000").unwrap(),
1322            fio::PERM_READABLE,
1323        );
1324        let inner_part_block =
1325            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1326                .expect("Failed to open Block service");
1327
1328        let client =
1329            RemoteBlockClient::new(inner_part_block).await.expect("Failed to create block client");
1330        assert_eq!(client.block_count(), 1);
1331        assert_eq!(client.block_size(), 512);
1332
1333        let buffer = vec![0xaa; 512];
1334        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1335        client
1336            .write_at(BufferSlice::Memory(&buffer), 512)
1337            .await
1338            .expect_err("Write past end should fail");
1339        client.flush().await.unwrap();
1340
1341        runner.shutdown().await;
1342        outer_runner.shutdown().await;
1343
1344        // Check that the write targeted the correct block (4 + 5 = 9)
1345        let data = vmo.read_to_vec(9 * 512, 512).unwrap();
1346        assert_eq!(&data[..], &buffer[..]);
1347    }
1348
1349    #[fuchsia::test]
1350    async fn offset_map_does_not_allow_partition_overwrite() {
1351        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1352        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1353        const PART_NAME: &str = "part";
1354
1355        let (block_device, partitions_dir) = setup_with_options(
1356            FakeServerOptions {
1357                block_count: Some(16),
1358                block_size: 512,
1359                flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1360                ..Default::default()
1361            },
1362            vec![PartitionInfo {
1363                label: PART_NAME.to_string(),
1364                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1365                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1366                start_block: 4,
1367                num_blocks: 2,
1368                flags: 0xabcd,
1369            }],
1370        )
1371        .await;
1372
1373        let partitions_dir_clone = partitions_dir.clone();
1374        let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
1375            .await
1376            .expect("load should succeed");
1377
1378        let part_dir = vfs::serve_directory(
1379            partitions_dir.clone(),
1380            vfs::path::Path::validate_and_split("part-000").unwrap(),
1381            fio::PERM_READABLE,
1382        );
1383
1384        // Open a session that shifts all block offsets by one.  The apparent range of the partition
1385        // should be [0..512) bytes (which corresponds to [512..1024) in the partition), because
1386        // bytes [512..1024) would be mapped to [1024..1536) which exceeds the partition's limit.
1387        let part_block =
1388            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1389                .expect("Failed to open Block service");
1390        let info = part_block.get_info().await.expect("FIDL error").expect("get_info failed");
1391        let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1392        part_block
1393            .open_session_with_offset_map(
1394                server_end,
1395                None,
1396                Some(&[fblock::BlockOffsetMapping {
1397                    source_block_offset: 0,
1398                    target_block_offset: 1,
1399                    length: 2,
1400                }]),
1401            )
1402            .expect("FIDL error");
1403
1404        let client = Arc::new(RemoteBlockClient::from_session(info, session).await.unwrap());
1405        let mut buffer = vec![0xaa; 512];
1406        client.flush().await.expect("Flush should succeed");
1407        client
1408            .read_at(MutableBufferSlice::Memory(&mut buffer), 0)
1409            .await
1410            .expect("Read should succeed");
1411        client.write_at(BufferSlice::Memory(&buffer), 0).await.expect("Write should succeed");
1412        client
1413            .read_at(MutableBufferSlice::Memory(&mut buffer), 512)
1414            .await
1415            .expect_err("Read past end should fail");
1416        client
1417            .write_at(BufferSlice::Memory(&buffer), 512)
1418            .await
1419            .expect_err("Write past end should fail");
1420
1421        runner.shutdown().await;
1422    }
1423}