gpt_component/
gpt.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::config::Config;
6use crate::partition::PartitionBackend;
7use crate::partitions_directory::PartitionsDirectory;
8use anyhow::{Context as _, Error, anyhow};
9use block_client::{
10    BlockClient as _, BufferSlice, MutableBufferSlice, ReadOptions, RemoteBlockClient, VmoId,
11    WriteOptions,
12};
13use block_server::BlockServer;
14use block_server::async_interface::SessionManager;
15
16use fidl::endpoints::ServerEnd;
17use fuchsia_sync::Mutex;
18use futures::stream::TryStreamExt as _;
19use std::collections::BTreeMap;
20use std::num::NonZero;
21use std::sync::atomic::{AtomicBool, Ordering};
22use std::sync::{Arc, Weak};
23use zx::AsHandleRef as _;
24use {
25    fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_storage_partitions as fpartitions,
26    fuchsia_async as fasync,
27};
28
29fn partition_directory_entry_name(index: u32) -> String {
30    format!("part-{:03}", index)
31}
32
33/// A single partition in a GPT device.
34pub struct GptPartition {
35    gpt: Weak<GptManager>,
36    info: Mutex<gpt::PartitionInfo>,
37    block_client: Arc<RemoteBlockClient>,
38}
39
40fn trace_id(trace_flow_id: Option<NonZero<u64>>) -> u64 {
41    trace_flow_id.map(|v| v.get()).unwrap_or_default()
42}
43
44impl GptPartition {
45    pub fn new(
46        gpt: &Arc<GptManager>,
47        block_client: Arc<RemoteBlockClient>,
48        info: gpt::PartitionInfo,
49    ) -> Arc<Self> {
50        Arc::new(Self { gpt: Arc::downgrade(gpt), info: Mutex::new(info), block_client })
51    }
52
53    pub async fn terminate(&self) {
54        if let Err(error) = self.block_client.close().await {
55            log::warn!(error:?; "Failed to close block client");
56        }
57    }
58
59    /// Replaces the partition info, returning its old value.
60    pub fn update_info(&self, info: gpt::PartitionInfo) -> gpt::PartitionInfo {
61        std::mem::replace(&mut *self.info.lock(), info)
62    }
63
64    pub fn block_size(&self) -> u32 {
65        self.block_client.block_size()
66    }
67
68    pub fn block_count(&self) -> u64 {
69        self.info.lock().num_blocks
70    }
71
72    pub async fn attach_vmo(&self, vmo: &zx::Vmo) -> Result<VmoId, zx::Status> {
73        self.block_client.attach_vmo(vmo).await
74    }
75
76    pub async fn detach_vmo(&self, vmoid: VmoId) -> Result<(), zx::Status> {
77        self.block_client.detach_vmo(vmoid).await
78    }
79
80    pub fn open_passthrough_session(&self, session: ServerEnd<fblock::SessionMarker>) {
81        if let Some(gpt) = self.gpt.upgrade() {
82            let mapping = {
83                let info = self.info.lock();
84                fblock::BlockOffsetMapping {
85                    source_block_offset: 0,
86                    target_block_offset: info.start_block,
87                    length: info.num_blocks,
88                }
89            };
90            if let Err(err) = gpt.block_proxy.open_session_with_offset_map(session, &mapping) {
91                // Client errors normally come back on `session` but that was already consumed.  The
92                // client will get a PEER_CLOSED without an epitaph.
93                log::warn!(err:?; "Failed to open passthrough session");
94            }
95        } else {
96            if let Err(err) = session.close_with_epitaph(zx::Status::BAD_STATE) {
97                log::warn!(err:?; "Failed to send session epitaph");
98            }
99        }
100    }
101
102    pub fn get_info(&self) -> block_server::DeviceInfo {
103        convert_partition_info(
104            &*self.info.lock(),
105            self.block_client.block_flags(),
106            self.block_client.max_transfer_blocks(),
107        )
108    }
109
110    pub async fn read(
111        &self,
112        device_block_offset: u64,
113        block_count: u32,
114        vmo_id: &VmoId,
115        vmo_offset: u64, // *bytes* not blocks
116        opts: ReadOptions,
117        trace_flow_id: Option<NonZero<u64>>,
118    ) -> Result<(), zx::Status> {
119        let dev_offset = self
120            .absolute_offset(device_block_offset, block_count)
121            .map(|offset| offset * self.block_size() as u64)?;
122        let buffer = MutableBufferSlice::new_with_vmo_id(
123            vmo_id,
124            vmo_offset,
125            (block_count * self.block_size()) as u64,
126        );
127        self.block_client
128            .read_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
129            .await
130    }
131
132    pub fn barrier(&self) {
133        self.block_client.barrier();
134    }
135
136    pub async fn write(
137        &self,
138        device_block_offset: u64,
139        block_count: u32,
140        vmo_id: &VmoId,
141        vmo_offset: u64, // *bytes* not blocks
142        opts: WriteOptions,
143        trace_flow_id: Option<NonZero<u64>>,
144    ) -> Result<(), zx::Status> {
145        let dev_offset = self
146            .absolute_offset(device_block_offset, block_count)
147            .map(|offset| offset * self.block_size() as u64)?;
148        let buffer = BufferSlice::new_with_vmo_id(
149            vmo_id,
150            vmo_offset,
151            (block_count * self.block_size()) as u64,
152        );
153        self.block_client
154            .write_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
155            .await
156    }
157
158    pub async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
159        self.block_client.flush_traced(trace_id(trace_flow_id)).await
160    }
161
162    pub async fn trim(
163        &self,
164        device_block_offset: u64,
165        block_count: u32,
166        trace_flow_id: Option<NonZero<u64>>,
167    ) -> Result<(), zx::Status> {
168        let dev_offset = self
169            .absolute_offset(device_block_offset, block_count)
170            .map(|offset| offset * self.block_size() as u64)?;
171        let len = block_count as u64 * self.block_size() as u64;
172        self.block_client.trim_traced(dev_offset..dev_offset + len, trace_id(trace_flow_id)).await
173    }
174
175    // Converts a relative range specified by [offset, offset+len) into an absolute offset in the
176    // GPT device, performing bounds checking within the partition.  Returns ZX_ERR_OUT_OF_RANGE for
177    // an invalid offset/len.
178    fn absolute_offset(&self, mut offset: u64, len: u32) -> Result<u64, zx::Status> {
179        let info = self.info.lock();
180        offset = offset.checked_add(info.start_block).ok_or(zx::Status::OUT_OF_RANGE)?;
181        let end = offset.checked_add(len as u64).ok_or(zx::Status::OUT_OF_RANGE)?;
182        if end > info.start_block + info.num_blocks {
183            Err(zx::Status::OUT_OF_RANGE)
184        } else {
185            Ok(offset)
186        }
187    }
188}
189
190fn convert_partition_info(
191    info: &gpt::PartitionInfo,
192    device_flags: fblock::Flag,
193    max_transfer_blocks: Option<NonZero<u32>>,
194) -> block_server::DeviceInfo {
195    block_server::DeviceInfo::Partition(block_server::PartitionInfo {
196        device_flags,
197        max_transfer_blocks,
198        block_range: Some(info.start_block..info.start_block + info.num_blocks),
199        type_guid: info.type_guid.to_bytes(),
200        instance_guid: info.instance_guid.to_bytes(),
201        name: info.label.clone(),
202        flags: info.flags,
203    })
204}
205
206fn can_merge(a: &gpt::PartitionInfo, b: &gpt::PartitionInfo) -> bool {
207    a.start_block + a.num_blocks == b.start_block
208}
209
210struct PendingTransaction {
211    transaction: gpt::Transaction,
212    client_koid: zx::Koid,
213    // A list of indexes for partitions which were added in the transaction.  When committing, all
214    // newly created partitions are published.
215    added_partitions: Vec<u32>,
216    // A task which waits for the client end to be closed and clears the pending transaction.
217    _signal_task: fasync::Task<()>,
218}
219
220struct Inner {
221    gpt: gpt::Gpt,
222    partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
223    // We track these separately so that we do not update them during transaction commit.
224    overlay_partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
225    // Exposes all partitions for discovery by other components.  Should be kept in sync with
226    // `partitions`.
227    partitions_dir: PartitionsDirectory,
228    pending_transaction: Option<PendingTransaction>,
229}
230
231impl Inner {
232    /// Ensures that `transaction` matches our pending transaction.
233    fn ensure_transaction_matches(&self, transaction: &zx::EventPair) -> Result<(), zx::Status> {
234        if let Some(pending) = self.pending_transaction.as_ref() {
235            if transaction.get_koid()? == pending.client_koid {
236                Ok(())
237            } else {
238                Err(zx::Status::BAD_HANDLE)
239            }
240        } else {
241            Err(zx::Status::BAD_STATE)
242        }
243    }
244
245    fn bind_partition(
246        &mut self,
247        parent: &Arc<GptManager>,
248        index: u32,
249        info: gpt::PartitionInfo,
250        overlay_indexes: Vec<usize>,
251    ) -> Result<(), Error> {
252        log::trace!(
253            "GPT part {index}{}: {info:?}",
254            if !overlay_indexes.is_empty() { " (overlay)" } else { "" }
255        );
256        info.start_block
257            .checked_add(info.num_blocks)
258            .ok_or_else(|| anyhow!("Overflow in partition end"))?;
259        let partition =
260            PartitionBackend::new(GptPartition::new(parent, self.gpt.client().clone(), info));
261        let block_server = Arc::new(BlockServer::new(parent.block_size, partition));
262        if !overlay_indexes.is_empty() {
263            self.partitions_dir.add_overlay(
264                &partition_directory_entry_name(index),
265                Arc::downgrade(&block_server),
266                Arc::downgrade(parent),
267                overlay_indexes,
268            );
269            self.overlay_partitions.insert(index, block_server);
270        } else {
271            self.partitions_dir.add_partition(
272                &partition_directory_entry_name(index),
273                Arc::downgrade(&block_server),
274                Arc::downgrade(parent),
275                index as usize,
276            );
277            self.partitions.insert(index, block_server);
278        }
279        Ok(())
280    }
281
282    fn bind_super_and_userdata_partition(
283        &mut self,
284        parent: &Arc<GptManager>,
285        super_partition: (u32, gpt::PartitionInfo),
286        userdata_partition: (u32, gpt::PartitionInfo),
287    ) -> Result<(), Error> {
288        let info = gpt::PartitionInfo {
289            // TODO(https://fxbug.dev/443980711): This should come from configuration.
290            label: "super_and_userdata".to_string(),
291            type_guid: super_partition.1.type_guid.clone(),
292            instance_guid: super_partition.1.instance_guid.clone(),
293            start_block: super_partition.1.start_block,
294            num_blocks: super_partition.1.num_blocks + userdata_partition.1.num_blocks,
295            flags: super_partition.1.flags,
296        };
297        log::trace!(
298            "GPT merged parts {:?} + {:?} -> {info:?}",
299            super_partition.1,
300            userdata_partition.1
301        );
302        self.bind_partition(
303            parent,
304            super_partition.0,
305            info,
306            vec![super_partition.0 as usize, userdata_partition.0 as usize],
307        )
308    }
309
310    fn bind_all_partitions(&mut self, parent: &Arc<GptManager>) -> Result<(), Error> {
311        self.partitions.clear();
312        self.overlay_partitions.clear();
313        self.partitions_dir.clear();
314
315        let mut partitions = self.gpt.partitions().clone();
316        if parent.config.merge_super_and_userdata {
317            // Attempt to merge the first `super` and `userdata` we find.  The rest will be treated
318            // as regular partitions.
319            let super_part = match partitions
320                .iter()
321                .find(|(_, info)| info.label == "super")
322                .map(|(index, _)| *index)
323            {
324                Some(index) => partitions.remove_entry(&index),
325                None => None,
326            };
327            let userdata_part = match partitions
328                .iter()
329                .find(|(_, info)| info.label == "userdata")
330                .map(|(index, _)| *index)
331            {
332                Some(index) => partitions.remove_entry(&index),
333                None => None,
334            };
335            if super_part.is_some() && userdata_part.is_some() {
336                let super_part = super_part.unwrap();
337                let userdata_part = userdata_part.unwrap();
338                if can_merge(&super_part.1, &userdata_part.1) {
339                    self.bind_super_and_userdata_partition(parent, super_part, userdata_part)?;
340                } else {
341                    log::warn!("super/userdata cannot be merged");
342                    self.bind_partition(parent, super_part.0, super_part.1, vec![])?;
343                    self.bind_partition(parent, userdata_part.0, userdata_part.1, vec![])?;
344                }
345            } else if super_part.is_some() || userdata_part.is_some() {
346                log::warn!("Only one of super/userdata found; not merging");
347                let (index, info) = super_part.or(userdata_part).unwrap();
348                self.bind_partition(parent, index, info, vec![])?;
349            }
350        }
351        for (index, info) in partitions {
352            self.bind_partition(parent, index, info, vec![])?;
353        }
354        Ok(())
355    }
356
357    fn add_partition(&mut self, info: gpt::PartitionInfo) -> Result<usize, gpt::AddPartitionError> {
358        let pending = self.pending_transaction.as_mut().unwrap();
359        let idx = self.gpt.add_partition(&mut pending.transaction, info)?;
360        pending.added_partitions.push(idx as u32);
361        Ok(idx)
362    }
363}
364
365/// Runs a GPT device.
366pub struct GptManager {
367    config: Config,
368    block_proxy: fblock::BlockProxy,
369    block_size: u32,
370    block_count: u64,
371    inner: futures::lock::Mutex<Inner>,
372    shutdown: AtomicBool,
373}
374
375impl std::fmt::Debug for GptManager {
376    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
377        f.debug_struct("GptManager")
378            .field("block_size", &self.block_size)
379            .field("block_count", &self.block_count)
380            .finish()
381    }
382}
383
384impl GptManager {
385    pub async fn new(
386        block_proxy: fblock::BlockProxy,
387        partitions_dir: Arc<vfs::directory::immutable::Simple>,
388    ) -> Result<Arc<Self>, Error> {
389        Self::new_with_config(block_proxy, partitions_dir, Config::default()).await
390    }
391
392    pub async fn new_with_config(
393        block_proxy: fblock::BlockProxy,
394        partitions_dir: Arc<vfs::directory::immutable::Simple>,
395        config: Config,
396    ) -> Result<Arc<Self>, Error> {
397        log::info!("Binding to GPT");
398        let client = Arc::new(RemoteBlockClient::new(block_proxy.clone()).await?);
399        let block_size = client.block_size();
400        let block_count = client.block_count();
401        let gpt = gpt::Gpt::open(client).await.context("Failed to load GPT")?;
402
403        let this = Arc::new(Self {
404            config,
405            block_proxy,
406            block_size,
407            block_count,
408            inner: futures::lock::Mutex::new(Inner {
409                gpt,
410                partitions: BTreeMap::new(),
411                overlay_partitions: BTreeMap::new(),
412                partitions_dir: PartitionsDirectory::new(partitions_dir),
413                pending_transaction: None,
414            }),
415            shutdown: AtomicBool::new(false),
416        });
417        this.inner.lock().await.bind_all_partitions(&this)?;
418        log::info!("Starting all partitions OK!");
419        Ok(this)
420    }
421
422    pub fn block_size(&self) -> u32 {
423        self.block_size
424    }
425
426    pub fn block_count(&self) -> u64 {
427        self.block_count
428    }
429
430    pub async fn create_transaction(self: &Arc<Self>) -> Result<zx::EventPair, zx::Status> {
431        let mut inner = self.inner.lock().await;
432        if inner.pending_transaction.is_some() {
433            return Err(zx::Status::ALREADY_EXISTS);
434        }
435        let transaction = inner.gpt.create_transaction().unwrap();
436        let (client_end, server_end) = zx::EventPair::create();
437        let client_koid = client_end.get_koid()?;
438        let signal_waiter = fasync::OnSignals::new(server_end, zx::Signals::EVENTPAIR_PEER_CLOSED);
439        let this = self.clone();
440        let task = fasync::Task::spawn(async move {
441            let _ = signal_waiter.await;
442            let mut inner = this.inner.lock().await;
443            if inner.pending_transaction.as_ref().map_or(false, |t| t.client_koid == client_koid) {
444                inner.pending_transaction = None;
445            }
446        });
447        inner.pending_transaction = Some(PendingTransaction {
448            transaction,
449            client_koid,
450            added_partitions: vec![],
451            _signal_task: task,
452        });
453        Ok(client_end)
454    }
455
456    pub async fn commit_transaction(
457        self: &Arc<Self>,
458        transaction: zx::EventPair,
459    ) -> Result<(), zx::Status> {
460        let mut inner = self.inner.lock().await;
461        inner.ensure_transaction_matches(&transaction)?;
462        let pending = std::mem::take(&mut inner.pending_transaction).unwrap();
463        let partitions = pending.transaction.partitions.clone();
464        if let Err(err) = inner.gpt.commit_transaction(pending.transaction).await {
465            log::warn!(err:?; "Failed to commit transaction");
466            return Err(zx::Status::IO);
467        }
468        // Everything after this point should be infallible.
469        for (info, idx) in partitions
470            .iter()
471            .zip(0u32..)
472            .filter(|(info, idx)| !info.is_nil() && !pending.added_partitions.contains(idx))
473        {
474            // Some physical partitions are not tracked in `inner.partitions` (e.g. when we use an
475            // overlay partition to combine two physical partitions).  In this case, we still need
476            // to propagate the info in the underlying transaction, but there's no need to update
477            // the in-memory info.
478            // Note that overlay partitions can't be changed by transactions anyways, so the info
479            // we propagate should be exactly what it was when we created the transaction.
480            if let Some(part) = inner.partitions.get(&idx) {
481                part.session_manager().interface().update_info(info.clone());
482            }
483        }
484        for idx in pending.added_partitions {
485            if let Some(info) = inner.gpt.partitions().get(&idx).cloned() {
486                if let Err(err) = inner.bind_partition(self, idx, info, vec![]) {
487                    log::error!(err:?; "Failed to bind partition");
488                }
489            }
490        }
491        Ok(())
492    }
493
494    pub async fn add_partition(
495        &self,
496        request: fpartitions::PartitionsManagerAddPartitionRequest,
497    ) -> Result<(), zx::Status> {
498        let mut inner = self.inner.lock().await;
499        inner.ensure_transaction_matches(
500            request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
501        )?;
502        let info = gpt::PartitionInfo {
503            label: request.name.ok_or(zx::Status::INVALID_ARGS)?,
504            type_guid: request
505                .type_guid
506                .map(|value| gpt::Guid::from_bytes(value.value))
507                .ok_or(zx::Status::INVALID_ARGS)?,
508            instance_guid: request
509                .instance_guid
510                .map(|value| gpt::Guid::from_bytes(value.value))
511                .unwrap_or_else(|| gpt::Guid::generate()),
512            start_block: 0,
513            num_blocks: request.num_blocks.ok_or(zx::Status::INVALID_ARGS)?,
514            flags: request.flags.unwrap_or_default(),
515        };
516        let idx = inner.add_partition(info)?;
517        let partition =
518            inner.pending_transaction.as_ref().unwrap().transaction.partitions.get(idx).unwrap();
519        log::info!(
520            "Allocated partition {:?} at {:?}",
521            partition.label,
522            partition.start_block..partition.start_block + partition.num_blocks
523        );
524        Ok(())
525    }
526
527    pub async fn handle_partitions_requests(
528        &self,
529        gpt_index: usize,
530        mut requests: fpartitions::PartitionRequestStream,
531    ) -> Result<(), zx::Status> {
532        while let Some(request) = requests.try_next().await.unwrap() {
533            match request {
534                fpartitions::PartitionRequest::UpdateMetadata { payload, responder } => {
535                    responder
536                        .send(
537                            self.update_partition_metadata(gpt_index, payload)
538                                .await
539                                .map_err(|status| status.into_raw()),
540                        )
541                        .unwrap_or_else(
542                            |err| log::error!(err:?; "Failed to send UpdateMetadata response"),
543                        );
544                }
545            }
546        }
547        Ok(())
548    }
549
550    async fn update_partition_metadata(
551        &self,
552        gpt_index: usize,
553        request: fpartitions::PartitionUpdateMetadataRequest,
554    ) -> Result<(), zx::Status> {
555        let mut inner = self.inner.lock().await;
556        inner.ensure_transaction_matches(
557            request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
558        )?;
559
560        let transaction = &mut inner.pending_transaction.as_mut().unwrap().transaction;
561        let entry = transaction.partitions.get_mut(gpt_index).ok_or(zx::Status::BAD_STATE)?;
562        if let Some(type_guid) = request.type_guid.as_ref().cloned() {
563            entry.type_guid = gpt::Guid::from_bytes(type_guid.value);
564        }
565        if let Some(flags) = request.flags.as_ref() {
566            entry.flags = *flags;
567        }
568        Ok(())
569    }
570
571    pub async fn handle_overlay_partitions_requests(
572        &self,
573        gpt_indexes: Vec<usize>,
574        mut requests: fpartitions::OverlayPartitionRequestStream,
575    ) -> Result<(), zx::Status> {
576        while let Some(request) = requests.try_next().await.unwrap() {
577            match request {
578                fpartitions::OverlayPartitionRequest::GetPartitions { responder } => {
579                    match self.get_overlay_partition_info(&gpt_indexes[..]).await {
580                        Ok(partitions) => responder.send(Ok(&partitions[..])),
581                        Err(status) => responder.send(Err(status.into_raw())),
582                    }
583                    .unwrap_or_else(
584                        |err| log::error!(err:?; "Failed to send GetPartitions response"),
585                    );
586                }
587            }
588        }
589        Ok(())
590    }
591
592    async fn get_overlay_partition_info(
593        &self,
594        gpt_indexes: &[usize],
595    ) -> Result<Vec<fpartitions::PartitionInfo>, zx::Status> {
596        fn convert_partition_info(info: &gpt::PartitionInfo) -> fpartitions::PartitionInfo {
597            fpartitions::PartitionInfo {
598                name: info.label.to_string(),
599                type_guid: fidl_fuchsia_hardware_block_partition::Guid {
600                    value: info.type_guid.to_bytes(),
601                },
602                instance_guid: fidl_fuchsia_hardware_block_partition::Guid {
603                    value: info.instance_guid.to_bytes(),
604                },
605                start_block: info.start_block,
606                num_blocks: info.num_blocks,
607                flags: info.flags,
608            }
609        }
610
611        let inner = self.inner.lock().await;
612        let mut partitions = vec![];
613        for index in gpt_indexes {
614            let index: u32 = *index as u32;
615            partitions.push(
616                inner
617                    .gpt
618                    .partitions()
619                    .get(&index)
620                    .map(convert_partition_info)
621                    .ok_or(zx::Status::BAD_STATE)?,
622            );
623        }
624        Ok(partitions)
625    }
626
627    pub async fn reset_partition_table(
628        self: &Arc<Self>,
629        partitions: Vec<gpt::PartitionInfo>,
630    ) -> Result<(), zx::Status> {
631        let mut inner = self.inner.lock().await;
632        if inner.pending_transaction.is_some() {
633            return Err(zx::Status::BAD_STATE);
634        }
635
636        log::info!("Resetting gpt.  Expect data loss!!!");
637        let mut transaction = inner.gpt.create_transaction().unwrap();
638        transaction.partitions = partitions;
639        inner.gpt.commit_transaction(transaction).await?;
640
641        if let Err(err) = inner.bind_all_partitions(&self) {
642            log::error!(err:?; "Failed to rebind partitions");
643            return Err(zx::Status::BAD_STATE);
644        }
645        log::info!("Rebinding partitions OK!");
646        Ok(())
647    }
648
649    pub async fn shutdown(self: Arc<Self>) {
650        log::info!("Shutting down gpt");
651        let mut inner = self.inner.lock().await;
652        inner.partitions_dir.clear();
653        inner.partitions.clear();
654        inner.overlay_partitions.clear();
655        self.shutdown.store(true, Ordering::Relaxed);
656        log::info!("Shutting down gpt OK");
657    }
658}
659
660impl Drop for GptManager {
661    fn drop(&mut self) {
662        assert!(self.shutdown.load(Ordering::Relaxed), "Did you forget to shutdown?");
663    }
664}
665
666#[cfg(test)]
667mod tests {
668    use super::GptManager;
669    use block_client::{
670        BlockClient as _, BufferSlice, MutableBufferSlice, RemoteBlockClient, WriteFlags,
671    };
672    use block_server::{BlockInfo, DeviceInfo, WriteOptions};
673    use fidl::HandleBased as _;
674    use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
675    use gpt::{Gpt, Guid, PartitionInfo};
676    use std::num::NonZero;
677    use std::sync::Arc;
678    use std::sync::atomic::{AtomicBool, Ordering};
679    use vmo_backed_block_server::{
680        InitialContents, VmoBackedServer, VmoBackedServerOptions, VmoBackedServerTestingExt as _,
681    };
682    use {
683        fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_hardware_block_volume as fvolume,
684        fidl_fuchsia_io as fio, fidl_fuchsia_storage_partitions as fpartitions,
685        fuchsia_async as fasync,
686    };
687
688    async fn setup(
689        block_size: u32,
690        block_count: u64,
691        partitions: Vec<PartitionInfo>,
692    ) -> (Arc<VmoBackedServer>, Arc<vfs::directory::immutable::Simple>) {
693        setup_with_options(
694            VmoBackedServerOptions {
695                initial_contents: InitialContents::FromCapacity(block_count),
696                block_size,
697                ..Default::default()
698            },
699            partitions,
700        )
701        .await
702    }
703
704    async fn setup_with_options(
705        opts: VmoBackedServerOptions<'_>,
706        partitions: Vec<PartitionInfo>,
707    ) -> (Arc<VmoBackedServer>, Arc<vfs::directory::immutable::Simple>) {
708        let server = Arc::new(opts.build().unwrap());
709        {
710            let (block_client, block_server) =
711                fidl::endpoints::create_proxy::<fblock::BlockMarker>();
712            let volume_stream = fidl::endpoints::ServerEnd::<fvolume::VolumeMarker>::from(
713                block_server.into_channel(),
714            )
715            .into_stream();
716            let server_clone = server.clone();
717            let _task = fasync::Task::spawn(async move { server_clone.serve(volume_stream).await });
718            let client = Arc::new(RemoteBlockClient::new(block_client).await.unwrap());
719            Gpt::format(client, partitions).await.unwrap();
720        }
721        (server, vfs::directory::immutable::simple())
722    }
723
724    #[fuchsia::test]
725    async fn load_unformatted_gpt() {
726        let vmo = zx::Vmo::create(4096).unwrap();
727        let server = Arc::new(VmoBackedServer::from_vmo(512, vmo));
728
729        GptManager::new(server.connect(), vfs::directory::immutable::simple())
730            .await
731            .expect_err("load should fail");
732    }
733
734    #[fuchsia::test]
735    async fn load_formatted_empty_gpt() {
736        let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
737
738        let runner = GptManager::new(block_device.connect(), partitions_dir)
739            .await
740            .expect("load should succeed");
741        runner.shutdown().await;
742    }
743
744    #[fuchsia::test]
745    async fn load_formatted_gpt_with_one_partition() {
746        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
747        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
748        const PART_NAME: &str = "part";
749
750        let (block_device, partitions_dir) = setup(
751            512,
752            8,
753            vec![PartitionInfo {
754                label: PART_NAME.to_string(),
755                type_guid: Guid::from_bytes(PART_TYPE_GUID),
756                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
757                start_block: 4,
758                num_blocks: 1,
759                flags: 0,
760            }],
761        )
762        .await;
763
764        let partitions_dir_clone = partitions_dir.clone();
765        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
766            .await
767            .expect("load should succeed");
768        partitions_dir.get_entry("part-000").expect("No entry found");
769        partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
770        runner.shutdown().await;
771    }
772
773    #[fuchsia::test]
774    async fn load_formatted_gpt_with_two_partitions() {
775        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
776        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
777        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
778        const PART_1_NAME: &str = "part1";
779        const PART_2_NAME: &str = "part2";
780
781        let (block_device, partitions_dir) = setup(
782            512,
783            8,
784            vec![
785                PartitionInfo {
786                    label: PART_1_NAME.to_string(),
787                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
788                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
789                    start_block: 4,
790                    num_blocks: 1,
791                    flags: 0,
792                },
793                PartitionInfo {
794                    label: PART_2_NAME.to_string(),
795                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
796                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
797                    start_block: 5,
798                    num_blocks: 1,
799                    flags: 0,
800                },
801            ],
802        )
803        .await;
804
805        let partitions_dir_clone = partitions_dir.clone();
806        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
807            .await
808            .expect("load should succeed");
809        partitions_dir.get_entry("part-000").expect("No entry found");
810        partitions_dir.get_entry("part-001").expect("No entry found");
811        partitions_dir.get_entry("part-002").map(|_| ()).expect_err("Extra entry found");
812        runner.shutdown().await;
813    }
814
815    #[fuchsia::test]
816    async fn partition_io() {
817        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
818        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
819        const PART_NAME: &str = "part";
820
821        let (block_device, partitions_dir) = setup(
822            512,
823            8,
824            vec![PartitionInfo {
825                label: PART_NAME.to_string(),
826                type_guid: Guid::from_bytes(PART_TYPE_GUID),
827                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
828                start_block: 4,
829                num_blocks: 2,
830                flags: 0,
831            }],
832        )
833        .await;
834
835        let partitions_dir_clone = partitions_dir.clone();
836        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
837            .await
838            .expect("load should succeed");
839
840        let proxy = vfs::serve_directory(
841            partitions_dir.clone(),
842            vfs::path::Path::validate_and_split("part-000").unwrap(),
843            fio::PERM_READABLE,
844        );
845        let block =
846            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
847                .expect("Failed to open block service");
848        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
849
850        assert_eq!(client.block_count(), 2);
851        assert_eq!(client.block_size(), 512);
852
853        let buf = vec![0xabu8; 512];
854        client.write_at(BufferSlice::Memory(&buf[..]), 0).await.expect("write_at failed");
855        client
856            .write_at(BufferSlice::Memory(&buf[..]), 1024)
857            .await
858            .expect_err("write_at should fail when writing past partition end");
859        let mut buf2 = vec![0u8; 512];
860        client.read_at(MutableBufferSlice::Memory(&mut buf2[..]), 0).await.expect("read_at failed");
861        assert_eq!(buf, buf2);
862        client
863            .read_at(MutableBufferSlice::Memory(&mut buf2[..]), 1024)
864            .await
865            .expect_err("read_at should fail when reading past partition end");
866        client.trim(512..1024).await.expect("trim failed");
867        client.trim(1..512).await.expect_err("trim with invalid range should fail");
868        client.trim(1024..1536).await.expect_err("trim past end of partition should fail");
869        runner.shutdown().await;
870
871        // Ensure writes persisted to the partition.
872        let mut buf = vec![0u8; 512];
873        let client =
874            RemoteBlockClient::new(block_device.connect::<fblock::BlockProxy>()).await.unwrap();
875        client.read_at(MutableBufferSlice::Memory(&mut buf[..]), 2048).await.unwrap();
876        assert_eq!(&buf[..], &[0xabu8; 512]);
877    }
878
879    #[fuchsia::test]
880    async fn load_formatted_gpt_with_invalid_primary_header() {
881        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
882        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
883        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
884        const PART_1_NAME: &str = "part1";
885        const PART_2_NAME: &str = "part2";
886
887        let (block_device, partitions_dir) = setup(
888            512,
889            8,
890            vec![
891                PartitionInfo {
892                    label: PART_1_NAME.to_string(),
893                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
894                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
895                    start_block: 4,
896                    num_blocks: 1,
897                    flags: 0,
898                },
899                PartitionInfo {
900                    label: PART_2_NAME.to_string(),
901                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
902                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
903                    start_block: 5,
904                    num_blocks: 1,
905                    flags: 0,
906                },
907            ],
908        )
909        .await;
910        {
911            let (client, stream) =
912                fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
913            let server = block_device.clone();
914            let _task = fasync::Task::spawn(async move { server.serve(stream).await });
915            let client = RemoteBlockClient::new(client).await.unwrap();
916            client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 512).await.unwrap();
917        }
918
919        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
920            .await
921            .expect("load should succeed");
922        partitions_dir.get_entry("part-000").expect("No entry found");
923        partitions_dir.get_entry("part-001").expect("No entry found");
924        runner.shutdown().await;
925    }
926
927    #[fuchsia::test]
928    async fn load_formatted_gpt_with_invalid_primary_partition_table() {
929        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
930        const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
931        const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
932        const PART_1_NAME: &str = "part1";
933        const PART_2_NAME: &str = "part2";
934
935        let (block_device, partitions_dir) = setup(
936            512,
937            8,
938            vec![
939                PartitionInfo {
940                    label: PART_1_NAME.to_string(),
941                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
942                    instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
943                    start_block: 4,
944                    num_blocks: 1,
945                    flags: 0,
946                },
947                PartitionInfo {
948                    label: PART_2_NAME.to_string(),
949                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
950                    instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
951                    start_block: 5,
952                    num_blocks: 1,
953                    flags: 0,
954                },
955            ],
956        )
957        .await;
958        {
959            let (client, stream) =
960                fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
961            let server = block_device.clone();
962            let _task = fasync::Task::spawn(async move { server.serve(stream).await });
963            let client = RemoteBlockClient::new(client).await.unwrap();
964            client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 1024).await.unwrap();
965        }
966
967        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
968            .await
969            .expect("load should succeed");
970        partitions_dir.get_entry("part-000").expect("No entry found");
971        partitions_dir.get_entry("part-001").expect("No entry found");
972        runner.shutdown().await;
973    }
974
975    #[fuchsia::test]
976    async fn force_access_passed_through() {
977        const BLOCK_SIZE: u32 = 512;
978        const BLOCK_COUNT: u64 = 1024;
979
980        struct Observer(Arc<AtomicBool>);
981
982        impl vmo_backed_block_server::Observer for Observer {
983            fn write(
984                &self,
985                _device_block_offset: u64,
986                _block_count: u32,
987                _vmo: &Arc<zx::Vmo>,
988                _vmo_offset: u64,
989                opts: WriteOptions,
990            ) -> vmo_backed_block_server::WriteAction {
991                assert_eq!(
992                    opts.flags.contains(WriteFlags::FORCE_ACCESS),
993                    self.0.load(Ordering::Relaxed)
994                );
995                vmo_backed_block_server::WriteAction::Write
996            }
997        }
998
999        let expect_force_access = Arc::new(AtomicBool::new(false));
1000        let (server, partitions_dir) = setup_with_options(
1001            VmoBackedServerOptions {
1002                initial_contents: InitialContents::FromCapacity(BLOCK_COUNT),
1003                block_size: BLOCK_SIZE,
1004                observer: Some(Box::new(Observer(expect_force_access.clone()))),
1005                ..Default::default()
1006            },
1007            vec![PartitionInfo {
1008                label: "foo".to_string(),
1009                type_guid: Guid::from_bytes([1; 16]),
1010                instance_guid: Guid::from_bytes([2; 16]),
1011                start_block: 4,
1012                num_blocks: 1,
1013                flags: 0,
1014            }],
1015        )
1016        .await;
1017
1018        let manager = GptManager::new(server.connect(), partitions_dir.clone()).await.unwrap();
1019
1020        let proxy = vfs::serve_directory(
1021            partitions_dir.clone(),
1022            vfs::path::Path::validate_and_split("part-000").unwrap(),
1023            fio::PERM_READABLE,
1024        );
1025        let block =
1026            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1027                .expect("Failed to open block service");
1028        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1029
1030        let buffer = vec![0; BLOCK_SIZE as usize];
1031        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1032
1033        expect_force_access.store(true, Ordering::Relaxed);
1034
1035        client
1036            .write_at_with_opts(
1037                BufferSlice::Memory(&buffer),
1038                0,
1039                WriteOptions { flags: WriteFlags::FORCE_ACCESS, ..Default::default() },
1040            )
1041            .await
1042            .unwrap();
1043
1044        manager.shutdown().await;
1045    }
1046
1047    #[fuchsia::test]
1048    async fn barrier_passed_through() {
1049        const BLOCK_SIZE: u32 = 512;
1050        const BLOCK_COUNT: u64 = 1024;
1051
1052        struct Observer(Arc<AtomicBool>);
1053
1054        impl vmo_backed_block_server::Observer for Observer {
1055            fn barrier(&self, _writes: Option<&mut vmo_backed_block_server::Writes>) {
1056                self.0.store(true, Ordering::Relaxed);
1057            }
1058        }
1059
1060        let expect_barrier = Arc::new(AtomicBool::new(false));
1061        let (server, partitions_dir) = setup_with_options(
1062            VmoBackedServerOptions {
1063                initial_contents: InitialContents::FromCapacity(BLOCK_COUNT),
1064                block_size: BLOCK_SIZE,
1065                observer: Some(Box::new(Observer(expect_barrier.clone()))),
1066                ..Default::default()
1067            },
1068            vec![PartitionInfo {
1069                label: "foo".to_string(),
1070                type_guid: Guid::from_bytes([1; 16]),
1071                instance_guid: Guid::from_bytes([2; 16]),
1072                start_block: 4,
1073                num_blocks: 1,
1074                flags: 0,
1075            }],
1076        )
1077        .await;
1078
1079        let manager = GptManager::new(server.connect(), partitions_dir.clone()).await.unwrap();
1080
1081        let proxy = vfs::serve_directory(
1082            partitions_dir.clone(),
1083            vfs::path::Path::validate_and_split("part-000").unwrap(),
1084            fio::PERM_READABLE,
1085        );
1086        let block =
1087            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1088                .expect("Failed to open block service");
1089        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1090
1091        let buffer = vec![0; BLOCK_SIZE as usize];
1092        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1093
1094        client.barrier();
1095        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1096
1097        assert!(expect_barrier.load(Ordering::Relaxed));
1098
1099        manager.shutdown().await;
1100    }
1101
1102    #[fuchsia::test]
1103    async fn commit_transaction() {
1104        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1105        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1106        const PART_1_NAME: &str = "part";
1107        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1108        const PART_2_NAME: &str = "part2";
1109
1110        let (block_device, partitions_dir) = setup(
1111            512,
1112            16,
1113            vec![
1114                PartitionInfo {
1115                    label: PART_1_NAME.to_string(),
1116                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1117                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1118                    start_block: 4,
1119                    num_blocks: 1,
1120                    flags: 0,
1121                },
1122                PartitionInfo {
1123                    label: PART_2_NAME.to_string(),
1124                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1125                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1126                    start_block: 5,
1127                    num_blocks: 1,
1128                    flags: 0,
1129                },
1130            ],
1131        )
1132        .await;
1133        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1134            .await
1135            .expect("load should succeed");
1136
1137        let part_0_dir = vfs::serve_directory(
1138            partitions_dir.clone(),
1139            vfs::Path::validate_and_split("part-000").unwrap(),
1140            fio::PERM_READABLE,
1141        );
1142        let part_1_dir = vfs::serve_directory(
1143            partitions_dir.clone(),
1144            vfs::Path::validate_and_split("part-001").unwrap(),
1145            fio::PERM_READABLE,
1146        );
1147        let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1148            &part_0_dir,
1149            "partition",
1150        )
1151        .expect("Failed to open Partition service");
1152        let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1153            &part_1_dir,
1154            "partition",
1155        )
1156        .expect("Failed to open Partition service");
1157
1158        let transaction = runner.create_transaction().await.expect("Failed to create transaction");
1159        part_0_proxy
1160            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1161                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1162                type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid {
1163                    value: [0xffu8; 16],
1164                }),
1165                ..Default::default()
1166            })
1167            .await
1168            .expect("FIDL error")
1169            .expect("Failed to update_metadata");
1170        part_1_proxy
1171            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1172                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1173                flags: Some(1234),
1174                ..Default::default()
1175            })
1176            .await
1177            .expect("FIDL error")
1178            .expect("Failed to update_metadata");
1179        runner.commit_transaction(transaction).await.expect("Failed to commit transaction");
1180
1181        // Ensure the changes have propagated to the correct partitions.
1182        let part_0_block =
1183            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_0_dir, "volume")
1184                .expect("Failed to open Volume service");
1185        let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
1186        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1187        assert_eq!(guid.unwrap().value, [0xffu8; 16]);
1188        let part_1_block =
1189            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_1_dir, "volume")
1190                .expect("Failed to open Volume service");
1191        let metadata =
1192            part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1193        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1194        assert_eq!(metadata.flags, Some(1234));
1195
1196        runner.shutdown().await;
1197    }
1198
1199    #[fuchsia::test]
1200    async fn commit_transaction_with_io_error() {
1201        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1202        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1203        const PART_1_NAME: &str = "part";
1204        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1205        const PART_2_NAME: &str = "part2";
1206
1207        #[derive(Clone)]
1208        struct Observer(Arc<AtomicBool>);
1209        impl vmo_backed_block_server::Observer for Observer {
1210            fn write(
1211                &self,
1212                _device_block_offset: u64,
1213                _block_count: u32,
1214                _vmo: &Arc<zx::Vmo>,
1215                _vmo_offset: u64,
1216                _opts: WriteOptions,
1217            ) -> vmo_backed_block_server::WriteAction {
1218                if self.0.load(Ordering::Relaxed) {
1219                    vmo_backed_block_server::WriteAction::Fail
1220                } else {
1221                    vmo_backed_block_server::WriteAction::Write
1222                }
1223            }
1224        }
1225        let observer = Observer(Arc::new(AtomicBool::new(false)));
1226        let (block_device, partitions_dir) = setup_with_options(
1227            VmoBackedServerOptions {
1228                initial_contents: InitialContents::FromCapacity(16),
1229                block_size: 512,
1230                observer: Some(Box::new(observer.clone())),
1231                ..Default::default()
1232            },
1233            vec![
1234                PartitionInfo {
1235                    label: PART_1_NAME.to_string(),
1236                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1237                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1238                    start_block: 4,
1239                    num_blocks: 1,
1240                    flags: 0,
1241                },
1242                PartitionInfo {
1243                    label: PART_2_NAME.to_string(),
1244                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1245                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1246                    start_block: 5,
1247                    num_blocks: 1,
1248                    flags: 0,
1249                },
1250            ],
1251        )
1252        .await;
1253        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1254            .await
1255            .expect("load should succeed");
1256
1257        let part_0_dir = vfs::serve_directory(
1258            partitions_dir.clone(),
1259            vfs::Path::validate_and_split("part-000").unwrap(),
1260            fio::PERM_READABLE,
1261        );
1262        let part_1_dir = vfs::serve_directory(
1263            partitions_dir.clone(),
1264            vfs::Path::validate_and_split("part-001").unwrap(),
1265            fio::PERM_READABLE,
1266        );
1267        let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1268            &part_0_dir,
1269            "partition",
1270        )
1271        .expect("Failed to open Partition service");
1272        let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1273            &part_1_dir,
1274            "partition",
1275        )
1276        .expect("Failed to open Partition service");
1277
1278        let transaction = runner.create_transaction().await.expect("Failed to create transaction");
1279        part_0_proxy
1280            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1281                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1282                type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid {
1283                    value: [0xffu8; 16],
1284                }),
1285                ..Default::default()
1286            })
1287            .await
1288            .expect("FIDL error")
1289            .expect("Failed to update_metadata");
1290        part_1_proxy
1291            .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1292                transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1293                flags: Some(1234),
1294                ..Default::default()
1295            })
1296            .await
1297            .expect("FIDL error")
1298            .expect("Failed to update_metadata");
1299
1300        observer.0.store(true, Ordering::Relaxed); // Fail the next write
1301        runner.commit_transaction(transaction).await.expect_err("Commit transaction should fail");
1302
1303        // Ensure the changes did not get applied.
1304        let part_0_block =
1305            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_0_dir, "volume")
1306                .expect("Failed to open Volume service");
1307        let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
1308        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1309        assert_eq!(guid.unwrap().value, PART_TYPE_GUID);
1310        let part_1_block =
1311            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_1_dir, "volume")
1312                .expect("Failed to open Volume service");
1313        let metadata =
1314            part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1315        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1316        assert_eq!(metadata.flags, Some(0));
1317
1318        runner.shutdown().await;
1319    }
1320
1321    #[fuchsia::test]
1322    async fn reset_partition_tables() {
1323        // The test will reset the tables from ["part", "part2"] to
1324        // ["part3", <empty>, "part4", <125 empty entries>].
1325        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1326        const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1327        const PART_1_NAME: &str = "part";
1328        const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1329        const PART_2_NAME: &str = "part2";
1330        const PART_3_NAME: &str = "part3";
1331        const PART_4_NAME: &str = "part4";
1332
1333        let (block_device, partitions_dir) = setup(
1334            512,
1335            1048576 / 512,
1336            vec![
1337                PartitionInfo {
1338                    label: PART_1_NAME.to_string(),
1339                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1340                    instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1341                    start_block: 4,
1342                    num_blocks: 1,
1343                    flags: 0,
1344                },
1345                PartitionInfo {
1346                    label: PART_2_NAME.to_string(),
1347                    type_guid: Guid::from_bytes(PART_TYPE_GUID),
1348                    instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1349                    start_block: 5,
1350                    num_blocks: 1,
1351                    flags: 0,
1352                },
1353            ],
1354        )
1355        .await;
1356        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1357            .await
1358            .expect("load should succeed");
1359        let nil_entry = PartitionInfo {
1360            label: "".to_string(),
1361            type_guid: Guid::from_bytes([0u8; 16]),
1362            instance_guid: Guid::from_bytes([0u8; 16]),
1363            start_block: 0,
1364            num_blocks: 0,
1365            flags: 0,
1366        };
1367        let mut new_partitions = vec![nil_entry; 128];
1368        new_partitions[0] = PartitionInfo {
1369            label: PART_3_NAME.to_string(),
1370            type_guid: Guid::from_bytes(PART_TYPE_GUID),
1371            instance_guid: Guid::from_bytes([1u8; 16]),
1372            start_block: 64,
1373            num_blocks: 2,
1374            flags: 0,
1375        };
1376        new_partitions[2] = PartitionInfo {
1377            label: PART_4_NAME.to_string(),
1378            type_guid: Guid::from_bytes(PART_TYPE_GUID),
1379            instance_guid: Guid::from_bytes([2u8; 16]),
1380            start_block: 66,
1381            num_blocks: 4,
1382            flags: 0,
1383        };
1384        runner.reset_partition_table(new_partitions).await.expect("reset_partition_table failed");
1385        partitions_dir.get_entry("part-000").expect("No entry found");
1386        partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
1387        partitions_dir.get_entry("part-002").expect("No entry found");
1388
1389        let proxy = vfs::serve_directory(
1390            partitions_dir.clone(),
1391            vfs::path::Path::validate_and_split("part-000").unwrap(),
1392            fio::PERM_READABLE,
1393        );
1394        let block =
1395            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1396                .expect("Failed to open block service");
1397        let (status, name) = block.get_name().await.expect("FIDL error");
1398        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1399        assert_eq!(name.unwrap(), PART_3_NAME);
1400
1401        runner.shutdown().await;
1402    }
1403
1404    #[fuchsia::test]
1405    async fn reset_partition_tables_fails_if_too_many_partitions() {
1406        let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
1407        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1408            .await
1409            .expect("load should succeed");
1410        let nil_entry = PartitionInfo {
1411            label: "".to_string(),
1412            type_guid: Guid::from_bytes([0u8; 16]),
1413            instance_guid: Guid::from_bytes([0u8; 16]),
1414            start_block: 0,
1415            num_blocks: 0,
1416            flags: 0,
1417        };
1418        let new_partitions = vec![nil_entry; 128];
1419        runner
1420            .reset_partition_table(new_partitions)
1421            .await
1422            .expect_err("reset_partition_table should fail");
1423
1424        runner.shutdown().await;
1425    }
1426
1427    #[fuchsia::test]
1428    async fn reset_partition_tables_fails_if_too_large_partitions() {
1429        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1430        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1431            .await
1432            .expect("load should succeed");
1433        let new_partitions = vec![
1434            PartitionInfo {
1435                label: "a".to_string(),
1436                type_guid: Guid::from_bytes([1u8; 16]),
1437                instance_guid: Guid::from_bytes([1u8; 16]),
1438                start_block: 4,
1439                num_blocks: 2,
1440                flags: 0,
1441            },
1442            PartitionInfo {
1443                label: "b".to_string(),
1444                type_guid: Guid::from_bytes([2u8; 16]),
1445                instance_guid: Guid::from_bytes([2u8; 16]),
1446                start_block: 6,
1447                num_blocks: 200,
1448                flags: 0,
1449            },
1450        ];
1451        runner
1452            .reset_partition_table(new_partitions)
1453            .await
1454            .expect_err("reset_partition_table should fail");
1455
1456        runner.shutdown().await;
1457    }
1458
1459    #[fuchsia::test]
1460    async fn reset_partition_tables_fails_if_partition_overlaps_metadata() {
1461        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1462        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1463            .await
1464            .expect("load should succeed");
1465        let new_partitions = vec![PartitionInfo {
1466            label: "a".to_string(),
1467            type_guid: Guid::from_bytes([1u8; 16]),
1468            instance_guid: Guid::from_bytes([1u8; 16]),
1469            start_block: 1,
1470            num_blocks: 2,
1471            flags: 0,
1472        }];
1473        runner
1474            .reset_partition_table(new_partitions)
1475            .await
1476            .expect_err("reset_partition_table should fail");
1477
1478        runner.shutdown().await;
1479    }
1480
1481    #[fuchsia::test]
1482    async fn reset_partition_tables_fails_if_partitions_overlap() {
1483        let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1484        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1485            .await
1486            .expect("load should succeed");
1487        let new_partitions = vec![
1488            PartitionInfo {
1489                label: "a".to_string(),
1490                type_guid: Guid::from_bytes([1u8; 16]),
1491                instance_guid: Guid::from_bytes([1u8; 16]),
1492                start_block: 32,
1493                num_blocks: 2,
1494                flags: 0,
1495            },
1496            PartitionInfo {
1497                label: "b".to_string(),
1498                type_guid: Guid::from_bytes([2u8; 16]),
1499                instance_guid: Guid::from_bytes([2u8; 16]),
1500                start_block: 33,
1501                num_blocks: 1,
1502                flags: 0,
1503            },
1504        ];
1505        runner
1506            .reset_partition_table(new_partitions)
1507            .await
1508            .expect_err("reset_partition_table should fail");
1509
1510        runner.shutdown().await;
1511    }
1512
1513    #[fuchsia::test]
1514    async fn add_partition() {
1515        let (block_device, partitions_dir) = setup(512, 64, vec![PartitionInfo::nil(); 64]).await;
1516        let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1517            .await
1518            .expect("load should succeed");
1519
1520        let transaction = runner.create_transaction().await.expect("Create transaction failed");
1521        let request = fpartitions::PartitionsManagerAddPartitionRequest {
1522            transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1523            name: Some("a".to_string()),
1524            type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid { value: [1u8; 16] }),
1525            num_blocks: Some(2),
1526            ..Default::default()
1527        };
1528        runner.add_partition(request).await.expect("add_partition failed");
1529        runner.commit_transaction(transaction).await.expect("add_partition failed");
1530
1531        let proxy = vfs::serve_directory(
1532            partitions_dir.clone(),
1533            vfs::path::Path::validate_and_split("part-000").unwrap(),
1534            fio::PERM_READABLE,
1535        );
1536        let block =
1537            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1538                .expect("Failed to open block service");
1539        let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1540
1541        assert_eq!(client.block_count(), 2);
1542        assert_eq!(client.block_size(), 512);
1543
1544        runner.shutdown().await;
1545    }
1546
1547    #[fuchsia::test]
1548    async fn partition_info() {
1549        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1550        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1551        const PART_NAME: &str = "part";
1552
1553        let (block_device, partitions_dir) = setup_with_options(
1554            VmoBackedServerOptions {
1555                initial_contents: InitialContents::FromCapacity(16),
1556                block_size: 512,
1557                info: DeviceInfo::Block(BlockInfo {
1558                    max_transfer_blocks: NonZero::new(2),
1559                    device_flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1560                    ..Default::default()
1561                }),
1562                ..Default::default()
1563            },
1564            vec![PartitionInfo {
1565                label: PART_NAME.to_string(),
1566                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1567                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1568                start_block: 4,
1569                num_blocks: 1,
1570                flags: 0xabcd,
1571            }],
1572        )
1573        .await;
1574
1575        let partitions_dir_clone = partitions_dir.clone();
1576        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
1577            .await
1578            .expect("load should succeed");
1579
1580        let part_dir = vfs::serve_directory(
1581            partitions_dir.clone(),
1582            vfs::path::Path::validate_and_split("part-000").unwrap(),
1583            fio::PERM_READABLE,
1584        );
1585        let part_block =
1586            connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_dir, "volume")
1587                .expect("Failed to open Volume service");
1588        let info = part_block.get_info().await.expect("FIDL error").expect("get_info failed");
1589        assert_eq!(info.block_count, 1);
1590        assert_eq!(info.block_size, 512);
1591        assert_eq!(info.flags, fblock::Flag::READONLY | fblock::Flag::REMOVABLE);
1592        assert_eq!(info.max_transfer_size, 1024);
1593
1594        let metadata =
1595            part_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1596        assert_eq!(metadata.name, Some(PART_NAME.to_string()));
1597        assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1598        assert_eq!(metadata.instance_guid.unwrap().value, PART_INSTANCE_GUID);
1599        assert_eq!(metadata.start_block_offset, Some(4));
1600        assert_eq!(metadata.num_blocks, Some(1));
1601        assert_eq!(metadata.flags, Some(0xabcd));
1602
1603        runner.shutdown().await;
1604    }
1605
1606    #[fuchsia::test]
1607    async fn nested_gpt() {
1608        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1609        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1610        const PART_NAME: &str = "part";
1611
1612        let vmo = zx::Vmo::create(64 * 512).unwrap();
1613        let vmo_clone = vmo.create_child(zx::VmoChildOptions::REFERENCE, 0, 0).unwrap();
1614        let (outer_block_device, outer_partitions_dir) = setup_with_options(
1615            VmoBackedServerOptions {
1616                initial_contents: InitialContents::FromVmo(vmo_clone),
1617                block_size: 512,
1618                info: DeviceInfo::Block(BlockInfo {
1619                    device_flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1620                    ..Default::default()
1621                }),
1622                ..Default::default()
1623            },
1624            vec![PartitionInfo {
1625                label: PART_NAME.to_string(),
1626                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1627                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1628                start_block: 4,
1629                num_blocks: 16,
1630                flags: 0xabcd,
1631            }],
1632        )
1633        .await;
1634
1635        let outer_partitions_dir_clone = outer_partitions_dir.clone();
1636        let outer_runner =
1637            GptManager::new(outer_block_device.connect(), outer_partitions_dir_clone)
1638                .await
1639                .expect("load should succeed");
1640
1641        let outer_part_dir = vfs::serve_directory(
1642            outer_partitions_dir.clone(),
1643            vfs::path::Path::validate_and_split("part-000").unwrap(),
1644            fio::PERM_READABLE,
1645        );
1646        let part_block =
1647            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&outer_part_dir, "volume")
1648                .expect("Failed to open Block service");
1649
1650        let client = Arc::new(RemoteBlockClient::new(part_block.clone()).await.unwrap());
1651        let _ = gpt::Gpt::format(
1652            client,
1653            vec![PartitionInfo {
1654                label: PART_NAME.to_string(),
1655                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1656                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1657                start_block: 5,
1658                num_blocks: 1,
1659                flags: 0xabcd,
1660            }],
1661        )
1662        .await
1663        .unwrap();
1664
1665        let partitions_dir = vfs::directory::immutable::simple();
1666        let partitions_dir_clone = partitions_dir.clone();
1667        let runner =
1668            GptManager::new(part_block, partitions_dir_clone).await.expect("load should succeed");
1669        let part_dir = vfs::serve_directory(
1670            partitions_dir.clone(),
1671            vfs::path::Path::validate_and_split("part-000").unwrap(),
1672            fio::PERM_READABLE,
1673        );
1674        let inner_part_block =
1675            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1676                .expect("Failed to open Block service");
1677
1678        let client =
1679            RemoteBlockClient::new(inner_part_block).await.expect("Failed to create block client");
1680        assert_eq!(client.block_count(), 1);
1681        assert_eq!(client.block_size(), 512);
1682
1683        let buffer = vec![0xaa; 512];
1684        client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1685        client
1686            .write_at(BufferSlice::Memory(&buffer), 512)
1687            .await
1688            .expect_err("Write past end should fail");
1689        client.flush().await.unwrap();
1690
1691        runner.shutdown().await;
1692        outer_runner.shutdown().await;
1693
1694        // Check that the write targeted the correct block (4 + 5 = 9)
1695        let data = vmo.read_to_vec::<u8>(9 * 512, 512).unwrap();
1696        assert_eq!(&data[..], &buffer[..]);
1697    }
1698
1699    #[fuchsia::test]
1700    async fn offset_map_does_not_allow_partition_overwrite() {
1701        const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1702        const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1703        const PART_NAME: &str = "part";
1704
1705        let (block_device, partitions_dir) = setup_with_options(
1706            VmoBackedServerOptions {
1707                initial_contents: InitialContents::FromCapacity(16),
1708                block_size: 512,
1709                info: DeviceInfo::Block(BlockInfo {
1710                    device_flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1711                    ..Default::default()
1712                }),
1713                ..Default::default()
1714            },
1715            vec![PartitionInfo {
1716                label: PART_NAME.to_string(),
1717                type_guid: Guid::from_bytes(PART_TYPE_GUID),
1718                instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1719                start_block: 4,
1720                num_blocks: 2,
1721                flags: 0xabcd,
1722            }],
1723        )
1724        .await;
1725
1726        let partitions_dir_clone = partitions_dir.clone();
1727        let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
1728            .await
1729            .expect("load should succeed");
1730
1731        let part_dir = vfs::serve_directory(
1732            partitions_dir.clone(),
1733            vfs::path::Path::validate_and_split("part-000").unwrap(),
1734            fio::PERM_READABLE,
1735        );
1736
1737        let part_block =
1738            connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1739                .expect("Failed to open Block service");
1740
1741        // Attempting to open a session with an offset map that extends past the end of the device
1742        // should fail.
1743        let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1744        part_block
1745            .open_session_with_offset_map(
1746                server_end,
1747                &fblock::BlockOffsetMapping {
1748                    source_block_offset: 0,
1749                    target_block_offset: 1,
1750                    length: 2,
1751                },
1752            )
1753            .expect("FIDL error");
1754        session.get_fifo().await.expect_err("Session should be closed");
1755
1756        let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1757        part_block
1758            .open_session_with_offset_map(
1759                server_end,
1760                &fblock::BlockOffsetMapping {
1761                    source_block_offset: 0,
1762                    target_block_offset: 0,
1763                    length: 3,
1764                },
1765            )
1766            .expect("FIDL error");
1767        session.get_fifo().await.expect_err("Session should be closed");
1768
1769        runner.shutdown().await;
1770    }
1771}