1use crate::config::Config;
6use crate::partition::PartitionBackend;
7use crate::partitions_directory::PartitionsDirectory;
8use anyhow::{Context as _, Error, anyhow};
9use block_client::{
10 BlockClient as _, BlockDeviceFlag, BufferSlice, MutableBufferSlice, ReadOptions,
11 RemoteBlockClient, VmoId, WriteOptions,
12};
13use block_server::BlockServer;
14use block_server::async_interface::SessionManager;
15
16use fidl::endpoints::ServerEnd;
17use fidl_fuchsia_storage_block as fblock;
18use fidl_fuchsia_storage_partitions as fpartitions;
19use fs_management::format::constants::{
20 ALL_BENCHMARK_PARTITION_LABELS, ALL_SYSTEM_PARTITION_LABELS,
21};
22use fuchsia_async as fasync;
23use fuchsia_sync::Mutex;
24use futures::stream::TryStreamExt as _;
25use std::collections::BTreeMap;
26use std::num::NonZero;
27use std::sync::atomic::{AtomicBool, Ordering};
28use std::sync::{Arc, Weak};
29
30fn partition_directory_entry_name(index: u32) -> String {
31 format!("part-{:03}", index)
32}
33
34fn should_passthrough_partition(info: &gpt::PartitionInfo) -> bool {
41 ALL_SYSTEM_PARTITION_LABELS.contains(&info.label.as_str())
43 || ALL_BENCHMARK_PARTITION_LABELS.contains(&info.label.as_str())
46}
47
48pub struct GptPartition {
50 gpt: Weak<GptManager>,
51 info: Mutex<gpt::PartitionInfo>,
52 block_client: Arc<RemoteBlockClient>,
53}
54
55fn trace_id(trace_flow_id: Option<NonZero<u64>>) -> u64 {
56 trace_flow_id.map(|v| v.get()).unwrap_or_default()
57}
58
59impl GptPartition {
60 pub fn new(
61 gpt: &Arc<GptManager>,
62 block_client: Arc<RemoteBlockClient>,
63 info: gpt::PartitionInfo,
64 ) -> Arc<Self> {
65 Arc::new(Self { gpt: Arc::downgrade(gpt), info: Mutex::new(info), block_client })
66 }
67
68 pub async fn terminate(&self) {
69 if let Err(error) = self.block_client.close().await {
70 log::warn!(error:?; "Failed to close block client");
71 }
72 }
73
74 pub fn update_info(&self, info: gpt::PartitionInfo) -> gpt::PartitionInfo {
76 std::mem::replace(&mut *self.info.lock(), info)
77 }
78
79 pub fn block_size(&self) -> u32 {
80 self.block_client.block_size()
81 }
82
83 pub fn block_count(&self) -> u64 {
84 self.info.lock().num_blocks
85 }
86
87 pub async fn attach_vmo(&self, vmo: &zx::Vmo) -> Result<VmoId, zx::Status> {
88 self.block_client.attach_vmo(vmo).await
89 }
90
91 pub async fn detach_vmo(&self, vmoid: VmoId) -> Result<(), zx::Status> {
92 self.block_client.detach_vmo(vmoid).await
93 }
94
95 pub fn open_passthrough_session(&self, session: ServerEnd<fblock::SessionMarker>) {
96 if let Some(gpt) = self.gpt.upgrade() {
97 let mapping = {
98 let info = self.info.lock();
99 fblock::BlockOffsetMapping {
100 source_block_offset: 0,
101 target_block_offset: info.start_block,
102 length: info.num_blocks,
103 }
104 };
105 if let Err(err) = gpt.block_proxy.open_session_with_offset_map(session, &mapping) {
106 log::warn!(err:?; "Failed to open passthrough session");
109 }
110 } else {
111 if let Err(err) = session.close_with_epitaph(zx::Status::BAD_STATE) {
112 log::warn!(err:?; "Failed to send session epitaph");
113 }
114 }
115 }
116
117 pub fn get_info(&self) -> block_server::DeviceInfo {
118 convert_partition_info(
119 &*self.info.lock(),
120 self.block_client.block_flags(),
121 self.block_client.max_transfer_blocks(),
122 )
123 }
124
125 pub async fn read(
126 &self,
127 device_block_offset: u64,
128 block_count: u32,
129 vmo_id: &VmoId,
130 vmo_offset: u64, opts: ReadOptions,
132 trace_flow_id: Option<NonZero<u64>>,
133 ) -> Result<(), zx::Status> {
134 let dev_offset = self
135 .absolute_offset(device_block_offset, block_count)
136 .map(|offset| offset * self.block_size() as u64)?;
137 let buffer = MutableBufferSlice::new_with_vmo_id(
138 vmo_id,
139 vmo_offset,
140 (block_count * self.block_size()) as u64,
141 );
142 self.block_client
143 .read_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
144 .await
145 }
146
147 pub async fn write(
148 &self,
149 device_block_offset: u64,
150 block_count: u32,
151 vmo_id: &VmoId,
152 vmo_offset: u64, opts: WriteOptions,
154 trace_flow_id: Option<NonZero<u64>>,
155 ) -> Result<(), zx::Status> {
156 let dev_offset = self
157 .absolute_offset(device_block_offset, block_count)
158 .map(|offset| offset * self.block_size() as u64)?;
159 let buffer = BufferSlice::new_with_vmo_id(
160 vmo_id,
161 vmo_offset,
162 (block_count * self.block_size()) as u64,
163 );
164 self.block_client
165 .write_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
166 .await
167 }
168
169 pub async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
170 self.block_client.flush_traced(trace_id(trace_flow_id)).await
171 }
172
173 pub async fn trim(
174 &self,
175 device_block_offset: u64,
176 block_count: u32,
177 trace_flow_id: Option<NonZero<u64>>,
178 ) -> Result<(), zx::Status> {
179 let dev_offset = self
180 .absolute_offset(device_block_offset, block_count)
181 .map(|offset| offset * self.block_size() as u64)?;
182 let len = block_count as u64 * self.block_size() as u64;
183 self.block_client.trim_traced(dev_offset..dev_offset + len, trace_id(trace_flow_id)).await
184 }
185
186 fn absolute_offset(&self, mut offset: u64, len: u32) -> Result<u64, zx::Status> {
190 let info = self.info.lock();
191 offset = offset.checked_add(info.start_block).ok_or(zx::Status::OUT_OF_RANGE)?;
192 let end = offset.checked_add(len as u64).ok_or(zx::Status::OUT_OF_RANGE)?;
193 if end > info.start_block + info.num_blocks {
194 Err(zx::Status::OUT_OF_RANGE)
195 } else {
196 Ok(offset)
197 }
198 }
199}
200
201fn convert_partition_info(
202 info: &gpt::PartitionInfo,
203 device_flags: BlockDeviceFlag,
204 max_transfer_blocks: Option<NonZero<u32>>,
205) -> block_server::DeviceInfo {
206 block_server::DeviceInfo::Partition(block_server::PartitionInfo {
207 device_flags,
208 max_transfer_blocks,
209 block_range: Some(info.start_block..info.start_block + info.num_blocks),
210 type_guid: info.type_guid.to_bytes(),
211 instance_guid: info.instance_guid.to_bytes(),
212 name: info.label.clone(),
213 flags: info.flags,
214 })
215}
216
217fn can_merge(a: &gpt::PartitionInfo, b: &gpt::PartitionInfo) -> bool {
218 a.start_block + a.num_blocks == b.start_block
219}
220
221struct PendingTransaction {
222 transaction: gpt::Transaction,
223 client_koid: zx::Koid,
224 added_partitions: Vec<u32>,
227 _signal_task: fasync::Task<()>,
229}
230
231struct Inner {
232 gpt: gpt::Gpt,
233 partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
234 overlay_partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
236 partitions_dir: PartitionsDirectory,
239 pending_transaction: Option<PendingTransaction>,
240}
241
242impl Inner {
243 fn ensure_transaction_matches(&self, transaction: &zx::EventPair) -> Result<(), zx::Status> {
245 if let Some(pending) = self.pending_transaction.as_ref() {
246 if transaction.koid()? == pending.client_koid {
247 Ok(())
248 } else {
249 Err(zx::Status::BAD_HANDLE)
250 }
251 } else {
252 Err(zx::Status::BAD_STATE)
253 }
254 }
255
256 fn bind_partition(
257 &mut self,
258 parent: &Arc<GptManager>,
259 index: u32,
260 info: gpt::PartitionInfo,
261 overlay_indexes: Vec<usize>,
262 ) -> Result<(), Error> {
263 let passthrough = should_passthrough_partition(&info);
264 log::debug!(
265 "GPT part {index}{}{}: {info:?}",
266 if !overlay_indexes.is_empty() { " (overlay)" } else { "" },
267 if passthrough { " (passthrough)" } else { "" },
268 );
269 info.start_block
270 .checked_add(info.num_blocks)
271 .ok_or_else(|| anyhow!("Overflow in partition end"))?;
272 let partition = PartitionBackend::new(
273 GptPartition::new(parent, self.gpt.client().clone(), info),
274 passthrough,
275 );
276 let block_server = Arc::new(BlockServer::new(parent.block_size, partition));
277 if !overlay_indexes.is_empty() {
278 self.partitions_dir.add_overlay(
279 &partition_directory_entry_name(index),
280 Arc::downgrade(&block_server),
281 Arc::downgrade(parent),
282 overlay_indexes,
283 );
284 self.overlay_partitions.insert(index, block_server);
285 } else {
286 self.partitions_dir.add_partition(
287 &partition_directory_entry_name(index),
288 Arc::downgrade(&block_server),
289 Arc::downgrade(parent),
290 index as usize,
291 );
292 self.partitions.insert(index, block_server);
293 }
294 Ok(())
295 }
296
297 fn bind_super_and_userdata_partition(
298 &mut self,
299 parent: &Arc<GptManager>,
300 super_partition: (u32, gpt::PartitionInfo),
301 userdata_partition: (u32, gpt::PartitionInfo),
302 ) -> Result<(), Error> {
303 let info = gpt::PartitionInfo {
304 label: "super_and_userdata".to_string(),
306 type_guid: super_partition.1.type_guid.clone(),
307 instance_guid: super_partition.1.instance_guid.clone(),
308 start_block: super_partition.1.start_block,
309 num_blocks: super_partition.1.num_blocks + userdata_partition.1.num_blocks,
310 flags: super_partition.1.flags,
311 };
312 log::trace!(
313 "GPT merged parts {:?} + {:?} -> {info:?}",
314 super_partition.1,
315 userdata_partition.1
316 );
317 self.bind_partition(
318 parent,
319 super_partition.0,
320 info,
321 vec![super_partition.0 as usize, userdata_partition.0 as usize],
322 )
323 }
324
325 fn bind_all_partitions(&mut self, parent: &Arc<GptManager>) -> Result<(), Error> {
326 self.partitions.clear();
327 self.overlay_partitions.clear();
328 self.partitions_dir.clear();
329
330 let mut partitions = self.gpt.partitions().clone();
331 if parent.config.merge_super_and_userdata {
332 let super_part = match partitions
335 .iter()
336 .find(|(_, info)| info.label == "super")
337 .map(|(index, _)| *index)
338 {
339 Some(index) => partitions.remove_entry(&index),
340 None => None,
341 };
342 let userdata_part = match partitions
343 .iter()
344 .find(|(_, info)| info.label == "userdata")
345 .map(|(index, _)| *index)
346 {
347 Some(index) => partitions.remove_entry(&index),
348 None => None,
349 };
350 if super_part.is_some() && userdata_part.is_some() {
351 let super_part = super_part.unwrap();
352 let userdata_part = userdata_part.unwrap();
353 if can_merge(&super_part.1, &userdata_part.1) {
354 self.bind_super_and_userdata_partition(parent, super_part, userdata_part)?;
355 } else {
356 log::warn!("super/userdata cannot be merged");
357 self.bind_partition(parent, super_part.0, super_part.1, vec![])?;
358 self.bind_partition(parent, userdata_part.0, userdata_part.1, vec![])?;
359 }
360 } else if super_part.is_some() || userdata_part.is_some() {
361 log::warn!("Only one of super/userdata found; not merging");
362 let (index, info) = super_part.or(userdata_part).unwrap();
363 self.bind_partition(parent, index, info, vec![])?;
364 }
365 }
366 for (index, info) in partitions {
367 self.bind_partition(parent, index, info, vec![])?;
368 }
369 Ok(())
370 }
371
372 fn add_partition(&mut self, info: gpt::PartitionInfo) -> Result<usize, gpt::AddPartitionError> {
373 let pending = self.pending_transaction.as_mut().unwrap();
374 let idx = self.gpt.add_partition(&mut pending.transaction, info)?;
375 pending.added_partitions.push(idx as u32);
376 Ok(idx)
377 }
378}
379
380pub struct GptManager {
382 config: Config,
383 block_proxy: fblock::BlockProxy,
384 block_size: u32,
385 block_count: u64,
386 inner: futures::lock::Mutex<Inner>,
387 shutdown: AtomicBool,
388}
389
390impl std::fmt::Debug for GptManager {
391 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
392 f.debug_struct("GptManager")
393 .field("block_size", &self.block_size)
394 .field("block_count", &self.block_count)
395 .finish()
396 }
397}
398
399impl GptManager {
400 pub async fn new(
401 block_proxy: fblock::BlockProxy,
402 partitions_dir: Arc<vfs::directory::immutable::Simple>,
403 ) -> Result<Arc<Self>, Error> {
404 Self::new_with_config(block_proxy, partitions_dir, Config::default()).await
405 }
406
407 pub async fn new_with_config(
408 block_proxy: fblock::BlockProxy,
409 partitions_dir: Arc<vfs::directory::immutable::Simple>,
410 config: Config,
411 ) -> Result<Arc<Self>, Error> {
412 log::info!("Binding to GPT");
413 let client = Arc::new(RemoteBlockClient::new(block_proxy.clone()).await?);
414 let block_size = client.block_size();
415 let block_count = client.block_count();
416 let gpt = gpt::Gpt::open(client).await.context("Failed to load GPT")?;
417
418 let this = Arc::new(Self {
419 config,
420 block_proxy,
421 block_size,
422 block_count,
423 inner: futures::lock::Mutex::new(Inner {
424 gpt,
425 partitions: BTreeMap::new(),
426 overlay_partitions: BTreeMap::new(),
427 partitions_dir: PartitionsDirectory::new(partitions_dir),
428 pending_transaction: None,
429 }),
430 shutdown: AtomicBool::new(false),
431 });
432 this.inner.lock().await.bind_all_partitions(&this)?;
433 log::info!("Starting all partitions OK!");
434 Ok(this)
435 }
436
437 pub fn block_size(&self) -> u32 {
438 self.block_size
439 }
440
441 pub fn block_count(&self) -> u64 {
442 self.block_count
443 }
444
445 pub async fn create_transaction(self: &Arc<Self>) -> Result<zx::EventPair, zx::Status> {
446 let mut inner = self.inner.lock().await;
447 if inner.pending_transaction.is_some() {
448 return Err(zx::Status::ALREADY_EXISTS);
449 }
450 let transaction = inner.gpt.create_transaction().unwrap();
451 let (client_end, server_end) = zx::EventPair::create();
452 let client_koid = client_end.koid()?;
453 let signal_waiter = fasync::OnSignals::new(server_end, zx::Signals::EVENTPAIR_PEER_CLOSED);
454 let this = self.clone();
455 let task = fasync::Task::spawn(async move {
456 let _ = signal_waiter.await;
457 let mut inner = this.inner.lock().await;
458 if inner.pending_transaction.as_ref().map_or(false, |t| t.client_koid == client_koid) {
459 inner.pending_transaction = None;
460 }
461 });
462 inner.pending_transaction = Some(PendingTransaction {
463 transaction,
464 client_koid,
465 added_partitions: vec![],
466 _signal_task: task,
467 });
468 Ok(client_end)
469 }
470
471 pub async fn commit_transaction(
472 self: &Arc<Self>,
473 transaction: zx::EventPair,
474 ) -> Result<(), zx::Status> {
475 let mut inner = self.inner.lock().await;
476 inner.ensure_transaction_matches(&transaction)?;
477 let pending = std::mem::take(&mut inner.pending_transaction).unwrap();
478 let partitions = pending.transaction.partitions.clone();
479 if let Err(err) = inner.gpt.commit_transaction(pending.transaction).await {
480 log::warn!(err:?; "Failed to commit transaction");
481 return Err(zx::Status::IO);
482 }
483 for (info, idx) in partitions
485 .iter()
486 .zip(0u32..)
487 .filter(|(info, idx)| !info.is_nil() && !pending.added_partitions.contains(idx))
488 {
489 if let Some(part) = inner.partitions.get(&idx) {
496 part.session_manager().interface().update_info(info.clone());
497 }
498 }
499 for idx in pending.added_partitions {
500 if let Some(info) = inner.gpt.partitions().get(&idx).cloned() {
501 if let Err(err) = inner.bind_partition(self, idx, info, vec![]) {
502 log::error!(err:?; "Failed to bind partition");
503 }
504 }
505 }
506 Ok(())
507 }
508
509 pub async fn add_partition(
510 &self,
511 request: fpartitions::PartitionsManagerAddPartitionRequest,
512 ) -> Result<(), zx::Status> {
513 let mut inner = self.inner.lock().await;
514 inner.ensure_transaction_matches(
515 request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
516 )?;
517 let info = gpt::PartitionInfo {
518 label: request.name.ok_or(zx::Status::INVALID_ARGS)?,
519 type_guid: request
520 .type_guid
521 .map(|value| gpt::Guid::from_bytes(value.value))
522 .ok_or(zx::Status::INVALID_ARGS)?,
523 instance_guid: request
524 .instance_guid
525 .map(|value| gpt::Guid::from_bytes(value.value))
526 .unwrap_or_else(|| gpt::Guid::generate()),
527 start_block: 0,
528 num_blocks: request.num_blocks.ok_or(zx::Status::INVALID_ARGS)?,
529 flags: request.flags.unwrap_or_default(),
530 };
531 let idx = inner.add_partition(info)?;
532 let partition =
533 inner.pending_transaction.as_ref().unwrap().transaction.partitions.get(idx).unwrap();
534 log::info!(
535 "Allocated partition {:?} at {:?}",
536 partition.label,
537 partition.start_block..partition.start_block + partition.num_blocks
538 );
539 Ok(())
540 }
541
542 pub async fn handle_partitions_requests(
543 &self,
544 gpt_index: usize,
545 mut requests: fpartitions::PartitionRequestStream,
546 ) -> Result<(), zx::Status> {
547 while let Some(request) = requests.try_next().await.unwrap() {
548 match request {
549 fpartitions::PartitionRequest::UpdateMetadata { payload, responder } => {
550 responder
551 .send(
552 self.update_partition_metadata(gpt_index, payload)
553 .await
554 .map_err(|status| status.into_raw()),
555 )
556 .unwrap_or_else(
557 |err| log::error!(err:?; "Failed to send UpdateMetadata response"),
558 );
559 }
560 }
561 }
562 Ok(())
563 }
564
565 async fn update_partition_metadata(
566 &self,
567 gpt_index: usize,
568 request: fpartitions::PartitionUpdateMetadataRequest,
569 ) -> Result<(), zx::Status> {
570 let mut inner = self.inner.lock().await;
571 inner.ensure_transaction_matches(
572 request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
573 )?;
574
575 let transaction = &mut inner.pending_transaction.as_mut().unwrap().transaction;
576 let entry = transaction.partitions.get_mut(gpt_index).ok_or(zx::Status::BAD_STATE)?;
577 if let Some(type_guid) = request.type_guid.as_ref().cloned() {
578 entry.type_guid = gpt::Guid::from_bytes(type_guid.value);
579 }
580 if let Some(flags) = request.flags.as_ref() {
581 entry.flags = *flags;
582 }
583 Ok(())
584 }
585
586 pub async fn handle_overlay_partitions_requests(
587 &self,
588 gpt_indexes: Vec<usize>,
589 mut requests: fpartitions::OverlayPartitionRequestStream,
590 ) -> Result<(), zx::Status> {
591 while let Some(request) = requests.try_next().await.unwrap() {
592 match request {
593 fpartitions::OverlayPartitionRequest::GetPartitions { responder } => {
594 match self.get_overlay_partition_info(&gpt_indexes[..]).await {
595 Ok(partitions) => responder.send(Ok(&partitions[..])),
596 Err(status) => responder.send(Err(status.into_raw())),
597 }
598 .unwrap_or_else(
599 |err| log::error!(err:?; "Failed to send GetPartitions response"),
600 );
601 }
602 }
603 }
604 Ok(())
605 }
606
607 async fn get_overlay_partition_info(
608 &self,
609 gpt_indexes: &[usize],
610 ) -> Result<Vec<fpartitions::PartitionInfo>, zx::Status> {
611 fn convert_partition_info(info: &gpt::PartitionInfo) -> fpartitions::PartitionInfo {
612 fpartitions::PartitionInfo {
613 name: info.label.to_string(),
614 type_guid: fblock::Guid { value: info.type_guid.to_bytes() },
615 instance_guid: fblock::Guid { value: info.instance_guid.to_bytes() },
616 start_block: info.start_block,
617 num_blocks: info.num_blocks,
618 flags: info.flags,
619 }
620 }
621
622 let inner = self.inner.lock().await;
623 let mut partitions = vec![];
624 for index in gpt_indexes {
625 let index: u32 = *index as u32;
626 partitions.push(
627 inner
628 .gpt
629 .partitions()
630 .get(&index)
631 .map(convert_partition_info)
632 .ok_or(zx::Status::BAD_STATE)?,
633 );
634 }
635 Ok(partitions)
636 }
637
638 pub async fn reset_partition_table(
639 self: &Arc<Self>,
640 partitions: Vec<gpt::PartitionInfo>,
641 ) -> Result<(), zx::Status> {
642 let mut inner = self.inner.lock().await;
643 if inner.pending_transaction.is_some() {
644 return Err(zx::Status::BAD_STATE);
645 }
646
647 log::info!("Resetting gpt. Expect data loss!!!");
648 let mut transaction = inner.gpt.create_transaction().unwrap();
649 transaction.partitions = partitions;
650 inner.gpt.commit_transaction(transaction).await?;
651
652 if let Err(err) = inner.bind_all_partitions(&self) {
653 log::error!(err:?; "Failed to rebind partitions");
654 return Err(zx::Status::BAD_STATE);
655 }
656 log::info!("Rebinding partitions OK!");
657 Ok(())
658 }
659
660 pub async fn shutdown(self: Arc<Self>) {
661 log::info!("Shutting down gpt");
662 let mut inner = self.inner.lock().await;
663 inner.partitions_dir.clear();
664 inner.partitions.clear();
665 inner.overlay_partitions.clear();
666 self.shutdown.store(true, Ordering::Relaxed);
667 log::info!("Shutting down gpt OK");
668 }
669}
670
671impl Drop for GptManager {
672 fn drop(&mut self) {
673 assert!(self.shutdown.load(Ordering::Relaxed), "Did you forget to shutdown?");
674 }
675}
676
677#[cfg(test)]
678mod tests {
679 use super::GptManager;
680 use block_client::{
681 BlockClient as _, BlockDeviceFlag, BufferSlice, MutableBufferSlice, RemoteBlockClient,
682 WriteFlags,
683 };
684 use block_server::{BlockInfo, DeviceInfo, WriteOptions};
685 use fidl::HandleBased as _;
686 use fidl_fuchsia_io as fio;
687 use fidl_fuchsia_storage_block as fblock;
688 use fidl_fuchsia_storage_partitions as fpartitions;
689 use fs_management::format::constants::FVM_PARTITION_LABEL;
690 use fuchsia_async as fasync;
691 use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
692 use gpt::{Gpt, Guid, PartitionInfo};
693 use std::num::NonZero;
694 use std::sync::Arc;
695 use std::sync::atomic::{AtomicBool, Ordering};
696 use vmo_backed_block_server::{
697 InitialContents, VmoBackedServer, VmoBackedServerOptions, VmoBackedServerTestingExt as _,
698 };
699
700 async fn setup(
701 block_size: u32,
702 block_count: u64,
703 partitions: Vec<PartitionInfo>,
704 ) -> (Arc<VmoBackedServer>, Arc<vfs::directory::immutable::Simple>) {
705 setup_with_options(
706 VmoBackedServerOptions {
707 initial_contents: InitialContents::FromCapacity(block_count),
708 block_size,
709 ..Default::default()
710 },
711 partitions,
712 )
713 .await
714 }
715
716 async fn setup_with_options(
717 opts: VmoBackedServerOptions<'_>,
718 partitions: Vec<PartitionInfo>,
719 ) -> (Arc<VmoBackedServer>, Arc<vfs::directory::immutable::Simple>) {
720 let server = Arc::new(opts.build().unwrap());
721 {
722 let (block_client, block_server) =
723 fidl::endpoints::create_proxy::<fblock::BlockMarker>();
724 let volume_stream = fidl::endpoints::ServerEnd::<fblock::BlockMarker>::from(
725 block_server.into_channel(),
726 )
727 .into_stream();
728 let server_clone = server.clone();
729 let _task = fasync::Task::spawn(async move { server_clone.serve(volume_stream).await });
730 let client = Arc::new(RemoteBlockClient::new(block_client).await.unwrap());
731 Gpt::format(client, partitions).await.unwrap();
732 }
733 (server, vfs::directory::immutable::simple())
734 }
735
736 #[fuchsia::test]
737 async fn load_unformatted_gpt() {
738 let vmo = zx::Vmo::create(4096).unwrap();
739 let server = Arc::new(VmoBackedServer::from_vmo(512, vmo));
740
741 GptManager::new(server.connect(), vfs::directory::immutable::simple())
742 .await
743 .expect_err("load should fail");
744 }
745
746 #[fuchsia::test]
747 async fn load_formatted_empty_gpt() {
748 let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
749
750 let runner = GptManager::new(block_device.connect(), partitions_dir)
751 .await
752 .expect("load should succeed");
753 runner.shutdown().await;
754 }
755
756 #[fuchsia::test]
757 async fn load_formatted_gpt_with_one_partition() {
758 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
759 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
760 const PART_NAME: &str = "part";
761
762 let (block_device, partitions_dir) = setup(
763 512,
764 8,
765 vec![PartitionInfo {
766 label: PART_NAME.to_string(),
767 type_guid: Guid::from_bytes(PART_TYPE_GUID),
768 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
769 start_block: 4,
770 num_blocks: 1,
771 flags: 0,
772 }],
773 )
774 .await;
775
776 let partitions_dir_clone = partitions_dir.clone();
777 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
778 .await
779 .expect("load should succeed");
780 partitions_dir.get_entry("part-000").expect("No entry found");
781 partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
782 runner.shutdown().await;
783 }
784
785 #[fuchsia::test]
786 async fn load_formatted_gpt_with_two_partitions() {
787 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
788 const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
789 const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
790 const PART_1_NAME: &str = "part1";
791 const PART_2_NAME: &str = "part2";
792
793 let (block_device, partitions_dir) = setup(
794 512,
795 8,
796 vec![
797 PartitionInfo {
798 label: PART_1_NAME.to_string(),
799 type_guid: Guid::from_bytes(PART_TYPE_GUID),
800 instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
801 start_block: 4,
802 num_blocks: 1,
803 flags: 0,
804 },
805 PartitionInfo {
806 label: PART_2_NAME.to_string(),
807 type_guid: Guid::from_bytes(PART_TYPE_GUID),
808 instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
809 start_block: 5,
810 num_blocks: 1,
811 flags: 0,
812 },
813 ],
814 )
815 .await;
816
817 let partitions_dir_clone = partitions_dir.clone();
818 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
819 .await
820 .expect("load should succeed");
821 partitions_dir.get_entry("part-000").expect("No entry found");
822 partitions_dir.get_entry("part-001").expect("No entry found");
823 partitions_dir.get_entry("part-002").map(|_| ()).expect_err("Extra entry found");
824 runner.shutdown().await;
825 }
826
827 #[fuchsia::test]
828 async fn partition_io() {
829 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
830 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
831 const PART_NAME: &str = "part";
832
833 let (block_device, partitions_dir) = setup(
834 512,
835 8,
836 vec![PartitionInfo {
837 label: PART_NAME.to_string(),
838 type_guid: Guid::from_bytes(PART_TYPE_GUID),
839 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
840 start_block: 4,
841 num_blocks: 2,
842 flags: 0,
843 }],
844 )
845 .await;
846
847 let partitions_dir_clone = partitions_dir.clone();
848 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
849 .await
850 .expect("load should succeed");
851
852 let proxy = vfs::serve_directory(
853 partitions_dir.clone(),
854 vfs::path::Path::validate_and_split("part-000").unwrap(),
855 fio::PERM_READABLE,
856 );
857 let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
858 .expect("Failed to open block service");
859 let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
860
861 assert_eq!(client.block_count(), 2);
862 assert_eq!(client.block_size(), 512);
863
864 let buf = vec![0xabu8; 512];
865 client.write_at(BufferSlice::Memory(&buf[..]), 0).await.expect("write_at failed");
866 client
867 .write_at(BufferSlice::Memory(&buf[..]), 1024)
868 .await
869 .expect_err("write_at should fail when writing past partition end");
870 let mut buf2 = vec![0u8; 512];
871 client.read_at(MutableBufferSlice::Memory(&mut buf2[..]), 0).await.expect("read_at failed");
872 assert_eq!(buf, buf2);
873 client
874 .read_at(MutableBufferSlice::Memory(&mut buf2[..]), 1024)
875 .await
876 .expect_err("read_at should fail when reading past partition end");
877 client.trim(512..1024).await.expect("trim failed");
878 client.trim(1..512).await.expect_err("trim with invalid range should fail");
879 client.trim(1024..1536).await.expect_err("trim past end of partition should fail");
880 runner.shutdown().await;
881
882 let mut buf = vec![0u8; 512];
884 let client =
885 RemoteBlockClient::new(block_device.connect::<fblock::BlockProxy>()).await.unwrap();
886 client.read_at(MutableBufferSlice::Memory(&mut buf[..]), 2048).await.unwrap();
887 assert_eq!(&buf[..], &[0xabu8; 512]);
888 }
889
890 #[fuchsia::test]
891 async fn load_formatted_gpt_with_invalid_primary_header() {
892 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
893 const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
894 const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
895 const PART_1_NAME: &str = "part1";
896 const PART_2_NAME: &str = "part2";
897
898 let (block_device, partitions_dir) = setup(
899 512,
900 8,
901 vec![
902 PartitionInfo {
903 label: PART_1_NAME.to_string(),
904 type_guid: Guid::from_bytes(PART_TYPE_GUID),
905 instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
906 start_block: 4,
907 num_blocks: 1,
908 flags: 0,
909 },
910 PartitionInfo {
911 label: PART_2_NAME.to_string(),
912 type_guid: Guid::from_bytes(PART_TYPE_GUID),
913 instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
914 start_block: 5,
915 num_blocks: 1,
916 flags: 0,
917 },
918 ],
919 )
920 .await;
921 {
922 let (client, stream) =
923 fidl::endpoints::create_proxy_and_stream::<fblock::BlockMarker>();
924 let server = block_device.clone();
925 let _task = fasync::Task::spawn(async move { server.serve(stream).await });
926 let client = RemoteBlockClient::new(client).await.unwrap();
927 client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 512).await.unwrap();
928 }
929
930 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
931 .await
932 .expect("load should succeed");
933 partitions_dir.get_entry("part-000").expect("No entry found");
934 partitions_dir.get_entry("part-001").expect("No entry found");
935 runner.shutdown().await;
936 }
937
938 #[fuchsia::test]
939 async fn load_formatted_gpt_with_invalid_primary_partition_table() {
940 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
941 const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
942 const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
943 const PART_1_NAME: &str = "part1";
944 const PART_2_NAME: &str = "part2";
945
946 let (block_device, partitions_dir) = setup(
947 512,
948 8,
949 vec![
950 PartitionInfo {
951 label: PART_1_NAME.to_string(),
952 type_guid: Guid::from_bytes(PART_TYPE_GUID),
953 instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
954 start_block: 4,
955 num_blocks: 1,
956 flags: 0,
957 },
958 PartitionInfo {
959 label: PART_2_NAME.to_string(),
960 type_guid: Guid::from_bytes(PART_TYPE_GUID),
961 instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
962 start_block: 5,
963 num_blocks: 1,
964 flags: 0,
965 },
966 ],
967 )
968 .await;
969 {
970 let (client, stream) =
971 fidl::endpoints::create_proxy_and_stream::<fblock::BlockMarker>();
972 let server = block_device.clone();
973 let _task = fasync::Task::spawn(async move { server.serve(stream).await });
974 let client = RemoteBlockClient::new(client).await.unwrap();
975 client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 1024).await.unwrap();
976 }
977
978 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
979 .await
980 .expect("load should succeed");
981 partitions_dir.get_entry("part-000").expect("No entry found");
982 partitions_dir.get_entry("part-001").expect("No entry found");
983 runner.shutdown().await;
984 }
985
986 #[fuchsia::test]
987 async fn force_access_passed_through() {
988 const BLOCK_SIZE: u32 = 512;
989 const BLOCK_COUNT: u64 = 1024;
990
991 struct Observer(Arc<AtomicBool>);
992
993 impl vmo_backed_block_server::Observer for Observer {
994 fn write(
995 &self,
996 _device_block_offset: u64,
997 _block_count: u32,
998 _vmo: &Arc<zx::Vmo>,
999 _vmo_offset: u64,
1000 opts: WriteOptions,
1001 ) -> vmo_backed_block_server::WriteAction {
1002 assert_eq!(
1003 opts.flags.contains(WriteFlags::FORCE_ACCESS),
1004 self.0.load(Ordering::Relaxed)
1005 );
1006 vmo_backed_block_server::WriteAction::Write
1007 }
1008 }
1009
1010 let expect_force_access = Arc::new(AtomicBool::new(false));
1011 let (server, partitions_dir) = setup_with_options(
1012 VmoBackedServerOptions {
1013 initial_contents: InitialContents::FromCapacity(BLOCK_COUNT),
1014 block_size: BLOCK_SIZE,
1015 observer: Some(Box::new(Observer(expect_force_access.clone()))),
1016 info: DeviceInfo::Block(BlockInfo {
1017 device_flags: fblock::DeviceFlag::FUA_SUPPORT,
1018 ..Default::default()
1019 }),
1020 ..Default::default()
1021 },
1022 vec![PartitionInfo {
1023 label: "foo".to_string(),
1024 type_guid: Guid::from_bytes([1; 16]),
1025 instance_guid: Guid::from_bytes([2; 16]),
1026 start_block: 4,
1027 num_blocks: 1,
1028 flags: 0,
1029 }],
1030 )
1031 .await;
1032
1033 let manager = GptManager::new(server.connect(), partitions_dir.clone()).await.unwrap();
1034
1035 let proxy = vfs::serve_directory(
1036 partitions_dir.clone(),
1037 vfs::path::Path::validate_and_split("part-000").unwrap(),
1038 fio::PERM_READABLE,
1039 );
1040 let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
1041 .expect("Failed to open block service");
1042 let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1043
1044 let buffer = vec![0; BLOCK_SIZE as usize];
1045 client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1046
1047 expect_force_access.store(true, Ordering::Relaxed);
1048
1049 client
1050 .write_at_with_opts(
1051 BufferSlice::Memory(&buffer),
1052 0,
1053 WriteOptions { flags: WriteFlags::FORCE_ACCESS, ..Default::default() },
1054 )
1055 .await
1056 .unwrap();
1057
1058 manager.shutdown().await;
1059 }
1060
1061 #[fuchsia::test]
1062 async fn barrier_passed_through() {
1063 const BLOCK_SIZE: u32 = 512;
1064 const BLOCK_COUNT: u64 = 1024;
1065
1066 struct Observer(Arc<AtomicBool>);
1067
1068 impl vmo_backed_block_server::Observer for Observer {
1069 fn write(
1070 &self,
1071 _device_block_offset: u64,
1072 _block_count: u32,
1073 _vmo: &Arc<zx::Vmo>,
1074 _vmo_offset: u64,
1075 opts: WriteOptions,
1076 ) -> vmo_backed_block_server::WriteAction {
1077 assert_eq!(
1078 opts.flags.contains(WriteFlags::PRE_BARRIER),
1079 self.0.load(Ordering::Relaxed)
1080 );
1081 vmo_backed_block_server::WriteAction::Write
1082 }
1083 }
1084
1085 let expect_barrier = Arc::new(AtomicBool::new(false));
1086 let (server, partitions_dir) = setup_with_options(
1087 VmoBackedServerOptions {
1088 initial_contents: InitialContents::FromCapacity(BLOCK_COUNT),
1089 block_size: BLOCK_SIZE,
1090 observer: Some(Box::new(Observer(expect_barrier.clone()))),
1091 info: DeviceInfo::Block(BlockInfo {
1092 device_flags: fblock::DeviceFlag::BARRIER_SUPPORT,
1093 ..Default::default()
1094 }),
1095 ..Default::default()
1096 },
1097 vec![PartitionInfo {
1098 label: "foo".to_string(),
1099 type_guid: Guid::from_bytes([1; 16]),
1100 instance_guid: Guid::from_bytes([2; 16]),
1101 start_block: 4,
1102 num_blocks: 1,
1103 flags: 0,
1104 }],
1105 )
1106 .await;
1107
1108 let manager = GptManager::new(server.connect(), partitions_dir.clone()).await.unwrap();
1109
1110 let proxy = vfs::serve_directory(
1111 partitions_dir.clone(),
1112 vfs::path::Path::validate_and_split("part-000").unwrap(),
1113 fio::PERM_READABLE,
1114 );
1115 let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
1116 .expect("Failed to open block service");
1117 let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1118
1119 let buffer = vec![0; BLOCK_SIZE as usize];
1120 client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1121
1122 expect_barrier.store(true, Ordering::Relaxed);
1123 client.barrier();
1124 client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1125
1126 manager.shutdown().await;
1127 }
1128
1129 #[fuchsia::test]
1130 async fn commit_transaction() {
1131 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1132 const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1133 const PART_1_NAME: &str = "part";
1134 const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1135 const PART_2_NAME: &str = "part2";
1136
1137 let (block_device, partitions_dir) = setup(
1138 512,
1139 16,
1140 vec![
1141 PartitionInfo {
1142 label: PART_1_NAME.to_string(),
1143 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1144 instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1145 start_block: 4,
1146 num_blocks: 1,
1147 flags: 0,
1148 },
1149 PartitionInfo {
1150 label: PART_2_NAME.to_string(),
1151 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1152 instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1153 start_block: 5,
1154 num_blocks: 1,
1155 flags: 0,
1156 },
1157 ],
1158 )
1159 .await;
1160 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1161 .await
1162 .expect("load should succeed");
1163
1164 let part_0_dir = vfs::serve_directory(
1165 partitions_dir.clone(),
1166 vfs::Path::validate_and_split("part-000").unwrap(),
1167 fio::PERM_READABLE,
1168 );
1169 let part_1_dir = vfs::serve_directory(
1170 partitions_dir.clone(),
1171 vfs::Path::validate_and_split("part-001").unwrap(),
1172 fio::PERM_READABLE,
1173 );
1174 let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1175 &part_0_dir,
1176 "partition",
1177 )
1178 .expect("Failed to open Partition service");
1179 let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1180 &part_1_dir,
1181 "partition",
1182 )
1183 .expect("Failed to open Partition service");
1184
1185 let transaction = runner.create_transaction().await.expect("Failed to create transaction");
1186 part_0_proxy
1187 .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1188 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1189 type_guid: Some(fblock::Guid { value: [0xffu8; 16] }),
1190 ..Default::default()
1191 })
1192 .await
1193 .expect("FIDL error")
1194 .expect("Failed to update_metadata");
1195 part_1_proxy
1196 .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1197 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1198 flags: Some(1234),
1199 ..Default::default()
1200 })
1201 .await
1202 .expect("FIDL error")
1203 .expect("Failed to update_metadata");
1204 runner.commit_transaction(transaction).await.expect("Failed to commit transaction");
1205
1206 let part_0_block =
1208 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_0_dir, "volume")
1209 .expect("Failed to open Volume service");
1210 let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
1211 assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1212 assert_eq!(guid.unwrap().value, [0xffu8; 16]);
1213 let part_1_block =
1214 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_1_dir, "volume")
1215 .expect("Failed to open Volume service");
1216 let metadata =
1217 part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1218 assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1219 assert_eq!(metadata.flags, Some(1234));
1220
1221 runner.shutdown().await;
1222 }
1223
1224 #[fuchsia::test]
1225 async fn commit_transaction_with_io_error() {
1226 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1227 const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1228 const PART_1_NAME: &str = "part";
1229 const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1230 const PART_2_NAME: &str = "part2";
1231
1232 #[derive(Clone)]
1233 struct Observer(Arc<AtomicBool>);
1234 impl vmo_backed_block_server::Observer for Observer {
1235 fn write(
1236 &self,
1237 _device_block_offset: u64,
1238 _block_count: u32,
1239 _vmo: &Arc<zx::Vmo>,
1240 _vmo_offset: u64,
1241 _opts: WriteOptions,
1242 ) -> vmo_backed_block_server::WriteAction {
1243 if self.0.load(Ordering::Relaxed) {
1244 vmo_backed_block_server::WriteAction::Fail
1245 } else {
1246 vmo_backed_block_server::WriteAction::Write
1247 }
1248 }
1249 }
1250 let observer = Observer(Arc::new(AtomicBool::new(false)));
1251 let (block_device, partitions_dir) = setup_with_options(
1252 VmoBackedServerOptions {
1253 initial_contents: InitialContents::FromCapacity(16),
1254 block_size: 512,
1255 observer: Some(Box::new(observer.clone())),
1256 ..Default::default()
1257 },
1258 vec![
1259 PartitionInfo {
1260 label: PART_1_NAME.to_string(),
1261 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1262 instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1263 start_block: 4,
1264 num_blocks: 1,
1265 flags: 0,
1266 },
1267 PartitionInfo {
1268 label: PART_2_NAME.to_string(),
1269 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1270 instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1271 start_block: 5,
1272 num_blocks: 1,
1273 flags: 0,
1274 },
1275 ],
1276 )
1277 .await;
1278 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1279 .await
1280 .expect("load should succeed");
1281
1282 let part_0_dir = vfs::serve_directory(
1283 partitions_dir.clone(),
1284 vfs::Path::validate_and_split("part-000").unwrap(),
1285 fio::PERM_READABLE,
1286 );
1287 let part_1_dir = vfs::serve_directory(
1288 partitions_dir.clone(),
1289 vfs::Path::validate_and_split("part-001").unwrap(),
1290 fio::PERM_READABLE,
1291 );
1292 let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1293 &part_0_dir,
1294 "partition",
1295 )
1296 .expect("Failed to open Partition service");
1297 let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
1298 &part_1_dir,
1299 "partition",
1300 )
1301 .expect("Failed to open Partition service");
1302
1303 let transaction = runner.create_transaction().await.expect("Failed to create transaction");
1304 part_0_proxy
1305 .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1306 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1307 type_guid: Some(fblock::Guid { value: [0xffu8; 16] }),
1308 ..Default::default()
1309 })
1310 .await
1311 .expect("FIDL error")
1312 .expect("Failed to update_metadata");
1313 part_1_proxy
1314 .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1315 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1316 flags: Some(1234),
1317 ..Default::default()
1318 })
1319 .await
1320 .expect("FIDL error")
1321 .expect("Failed to update_metadata");
1322
1323 observer.0.store(true, Ordering::Relaxed); runner.commit_transaction(transaction).await.expect_err("Commit transaction should fail");
1325
1326 let part_0_block =
1328 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_0_dir, "volume")
1329 .expect("Failed to open Volume service");
1330 let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
1331 assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1332 assert_eq!(guid.unwrap().value, [2u8; 16]);
1333 let part_1_block =
1334 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_1_dir, "volume")
1335 .expect("Failed to open Volume service");
1336 let metadata =
1337 part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1338 assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1339 assert_eq!(metadata.flags, Some(0));
1340
1341 runner.shutdown().await;
1342 }
1343
1344 #[fuchsia::test]
1345 async fn reset_partition_tables() {
1346 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1349 const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1350 const PART_1_NAME: &str = "part";
1351 const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1352 const PART_2_NAME: &str = "part2";
1353 const PART_3_NAME: &str = "part3";
1354 const PART_4_NAME: &str = "part4";
1355
1356 let (block_device, partitions_dir) = setup(
1357 512,
1358 1048576 / 512,
1359 vec![
1360 PartitionInfo {
1361 label: PART_1_NAME.to_string(),
1362 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1363 instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1364 start_block: 4,
1365 num_blocks: 1,
1366 flags: 0,
1367 },
1368 PartitionInfo {
1369 label: PART_2_NAME.to_string(),
1370 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1371 instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1372 start_block: 5,
1373 num_blocks: 1,
1374 flags: 0,
1375 },
1376 ],
1377 )
1378 .await;
1379 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1380 .await
1381 .expect("load should succeed");
1382 let nil_entry = PartitionInfo {
1383 label: "".to_string(),
1384 type_guid: Guid::from_bytes([0u8; 16]),
1385 instance_guid: Guid::from_bytes([0u8; 16]),
1386 start_block: 0,
1387 num_blocks: 0,
1388 flags: 0,
1389 };
1390 let mut new_partitions = vec![nil_entry; 128];
1391 new_partitions[0] = PartitionInfo {
1392 label: PART_3_NAME.to_string(),
1393 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1394 instance_guid: Guid::from_bytes([1u8; 16]),
1395 start_block: 64,
1396 num_blocks: 2,
1397 flags: 0,
1398 };
1399 new_partitions[2] = PartitionInfo {
1400 label: PART_4_NAME.to_string(),
1401 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1402 instance_guid: Guid::from_bytes([2u8; 16]),
1403 start_block: 66,
1404 num_blocks: 4,
1405 flags: 0,
1406 };
1407 runner.reset_partition_table(new_partitions).await.expect("reset_partition_table failed");
1408 partitions_dir.get_entry("part-000").expect("No entry found");
1409 partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
1410 partitions_dir.get_entry("part-002").expect("No entry found");
1411
1412 let proxy = vfs::serve_directory(
1413 partitions_dir.clone(),
1414 vfs::path::Path::validate_and_split("part-000").unwrap(),
1415 fio::PERM_READABLE,
1416 );
1417 let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
1418 .expect("Failed to open block service");
1419 let (status, name) = block.get_name().await.expect("FIDL error");
1420 assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1421 assert_eq!(name.unwrap(), PART_3_NAME);
1422
1423 runner.shutdown().await;
1424 }
1425
1426 #[fuchsia::test]
1427 async fn reset_partition_tables_fails_if_too_many_partitions() {
1428 let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
1429 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1430 .await
1431 .expect("load should succeed");
1432 let nil_entry = PartitionInfo {
1433 label: "".to_string(),
1434 type_guid: Guid::from_bytes([0u8; 16]),
1435 instance_guid: Guid::from_bytes([0u8; 16]),
1436 start_block: 0,
1437 num_blocks: 0,
1438 flags: 0,
1439 };
1440 let new_partitions = vec![nil_entry; 128];
1441 runner
1442 .reset_partition_table(new_partitions)
1443 .await
1444 .expect_err("reset_partition_table should fail");
1445
1446 runner.shutdown().await;
1447 }
1448
1449 #[fuchsia::test]
1450 async fn reset_partition_tables_fails_if_too_large_partitions() {
1451 let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1452 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1453 .await
1454 .expect("load should succeed");
1455 let new_partitions = vec![
1456 PartitionInfo {
1457 label: "a".to_string(),
1458 type_guid: Guid::from_bytes([1u8; 16]),
1459 instance_guid: Guid::from_bytes([1u8; 16]),
1460 start_block: 4,
1461 num_blocks: 2,
1462 flags: 0,
1463 },
1464 PartitionInfo {
1465 label: "b".to_string(),
1466 type_guid: Guid::from_bytes([2u8; 16]),
1467 instance_guid: Guid::from_bytes([2u8; 16]),
1468 start_block: 6,
1469 num_blocks: 200,
1470 flags: 0,
1471 },
1472 ];
1473 runner
1474 .reset_partition_table(new_partitions)
1475 .await
1476 .expect_err("reset_partition_table should fail");
1477
1478 runner.shutdown().await;
1479 }
1480
1481 #[fuchsia::test]
1482 async fn reset_partition_tables_fails_if_partition_overlaps_metadata() {
1483 let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1484 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1485 .await
1486 .expect("load should succeed");
1487 let new_partitions = vec![PartitionInfo {
1488 label: "a".to_string(),
1489 type_guid: Guid::from_bytes([1u8; 16]),
1490 instance_guid: Guid::from_bytes([1u8; 16]),
1491 start_block: 1,
1492 num_blocks: 2,
1493 flags: 0,
1494 }];
1495 runner
1496 .reset_partition_table(new_partitions)
1497 .await
1498 .expect_err("reset_partition_table should fail");
1499
1500 runner.shutdown().await;
1501 }
1502
1503 #[fuchsia::test]
1504 async fn reset_partition_tables_fails_if_partitions_overlap() {
1505 let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1506 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1507 .await
1508 .expect("load should succeed");
1509 let new_partitions = vec![
1510 PartitionInfo {
1511 label: "a".to_string(),
1512 type_guid: Guid::from_bytes([1u8; 16]),
1513 instance_guid: Guid::from_bytes([1u8; 16]),
1514 start_block: 32,
1515 num_blocks: 2,
1516 flags: 0,
1517 },
1518 PartitionInfo {
1519 label: "b".to_string(),
1520 type_guid: Guid::from_bytes([2u8; 16]),
1521 instance_guid: Guid::from_bytes([2u8; 16]),
1522 start_block: 33,
1523 num_blocks: 1,
1524 flags: 0,
1525 },
1526 ];
1527 runner
1528 .reset_partition_table(new_partitions)
1529 .await
1530 .expect_err("reset_partition_table should fail");
1531
1532 runner.shutdown().await;
1533 }
1534
1535 #[fuchsia::test]
1536 async fn add_partition() {
1537 let (block_device, partitions_dir) = setup(512, 64, vec![PartitionInfo::nil(); 64]).await;
1538 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1539 .await
1540 .expect("load should succeed");
1541
1542 let transaction = runner.create_transaction().await.expect("Create transaction failed");
1543 let request = fpartitions::PartitionsManagerAddPartitionRequest {
1544 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1545 name: Some("a".to_string()),
1546 type_guid: Some(fblock::Guid { value: [1u8; 16] }),
1547 num_blocks: Some(2),
1548 ..Default::default()
1549 };
1550 runner.add_partition(request).await.expect("add_partition failed");
1551 runner.commit_transaction(transaction).await.expect("add_partition failed");
1552
1553 let proxy = vfs::serve_directory(
1554 partitions_dir.clone(),
1555 vfs::path::Path::validate_and_split("part-000").unwrap(),
1556 fio::PERM_READABLE,
1557 );
1558 let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
1559 .expect("Failed to open block service");
1560 let client: RemoteBlockClient =
1561 RemoteBlockClient::new(block).await.expect("Failed to create block client");
1562
1563 assert_eq!(client.block_count(), 2);
1564 assert_eq!(client.block_size(), 512);
1565
1566 runner.shutdown().await;
1567 }
1568
1569 #[fuchsia::test]
1570 async fn partition_info() {
1571 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1572 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1573 const PART_NAME: &str = "part";
1574
1575 let (block_device, partitions_dir) = setup_with_options(
1576 VmoBackedServerOptions {
1577 initial_contents: InitialContents::FromCapacity(16),
1578 block_size: 512,
1579 info: DeviceInfo::Block(BlockInfo {
1580 max_transfer_blocks: NonZero::new(2),
1581 device_flags: BlockDeviceFlag::READONLY
1582 | BlockDeviceFlag::REMOVABLE
1583 | BlockDeviceFlag::ZSTD_DECOMPRESSION_SUPPORT,
1584 ..Default::default()
1585 }),
1586 ..Default::default()
1587 },
1588 vec![PartitionInfo {
1589 label: PART_NAME.to_string(),
1590 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1591 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1592 start_block: 4,
1593 num_blocks: 1,
1594 flags: 0xabcd,
1595 }],
1596 )
1597 .await;
1598
1599 let partitions_dir_clone = partitions_dir.clone();
1600 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
1601 .await
1602 .expect("load should succeed");
1603
1604 let part_dir = vfs::serve_directory(
1605 partitions_dir.clone(),
1606 vfs::path::Path::validate_and_split("part-000").unwrap(),
1607 fio::PERM_READABLE,
1608 );
1609 let part_block =
1610 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1611 .expect("Failed to open Volume service");
1612 let info: fblock::BlockInfo =
1613 part_block.get_info().await.expect("FIDL error").expect("get_info failed");
1614 assert_eq!(info.block_count, 1);
1615 assert_eq!(info.block_size, 512);
1616 assert_eq!(
1617 info.flags,
1618 BlockDeviceFlag::READONLY
1619 | BlockDeviceFlag::REMOVABLE
1620 | BlockDeviceFlag::ZSTD_DECOMPRESSION_SUPPORT
1621 );
1622 assert_eq!(info.max_transfer_size, 1024);
1623
1624 let metadata: fblock::BlockGetMetadataResponse =
1625 part_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1626 assert_eq!(metadata.name, Some(PART_NAME.to_string()));
1627 assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1628 assert_eq!(metadata.instance_guid.unwrap().value, PART_INSTANCE_GUID);
1629 assert_eq!(metadata.start_block_offset, Some(4));
1630 assert_eq!(metadata.num_blocks, Some(1));
1631 assert_eq!(metadata.flags, Some(0xabcd));
1632
1633 runner.shutdown().await;
1634 }
1635
1636 #[fuchsia::test]
1637 async fn nested_gpt() {
1638 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1639 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1640 const PART_NAME: &str = "part";
1641
1642 let vmo = zx::Vmo::create(64 * 512).unwrap();
1643 let vmo_clone = vmo.create_child(zx::VmoChildOptions::REFERENCE, 0, 0).unwrap();
1644 let (outer_block_device, outer_partitions_dir) = setup_with_options(
1645 VmoBackedServerOptions {
1646 initial_contents: InitialContents::FromVmo(vmo_clone),
1647 block_size: 512,
1648 info: DeviceInfo::Block(BlockInfo {
1649 device_flags: BlockDeviceFlag::READONLY | BlockDeviceFlag::REMOVABLE,
1650 ..Default::default()
1651 }),
1652 ..Default::default()
1653 },
1654 vec![PartitionInfo {
1655 label: PART_NAME.to_string(),
1656 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1657 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1658 start_block: 4,
1659 num_blocks: 16,
1660 flags: 0xabcd,
1661 }],
1662 )
1663 .await;
1664
1665 let outer_partitions_dir_clone = outer_partitions_dir.clone();
1666 let outer_runner =
1667 GptManager::new(outer_block_device.connect(), outer_partitions_dir_clone)
1668 .await
1669 .expect("load should succeed");
1670
1671 let outer_part_dir = vfs::serve_directory(
1672 outer_partitions_dir.clone(),
1673 vfs::path::Path::validate_and_split("part-000").unwrap(),
1674 fio::PERM_READABLE,
1675 );
1676 let part_block =
1677 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&outer_part_dir, "volume")
1678 .expect("Failed to open Block service");
1679
1680 let client = Arc::new(RemoteBlockClient::new(part_block.clone()).await.unwrap());
1681 let _ = gpt::Gpt::format(
1682 client,
1683 vec![PartitionInfo {
1684 label: PART_NAME.to_string(),
1685 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1686 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1687 start_block: 5,
1688 num_blocks: 1,
1689 flags: 0xabcd,
1690 }],
1691 )
1692 .await
1693 .unwrap();
1694
1695 let partitions_dir = vfs::directory::immutable::simple();
1696 let partitions_dir_clone = partitions_dir.clone();
1697 let runner =
1698 GptManager::new(part_block, partitions_dir_clone).await.expect("load should succeed");
1699 let part_dir = vfs::serve_directory(
1700 partitions_dir.clone(),
1701 vfs::path::Path::validate_and_split("part-000").unwrap(),
1702 fio::PERM_READABLE,
1703 );
1704 let inner_part_block =
1705 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1706 .expect("Failed to open Block service");
1707
1708 let client =
1709 RemoteBlockClient::new(inner_part_block).await.expect("Failed to create block client");
1710 assert_eq!(client.block_count(), 1);
1711 assert_eq!(client.block_size(), 512);
1712
1713 let buffer = vec![0xaa; 512];
1714 client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1715 client
1716 .write_at(BufferSlice::Memory(&buffer), 512)
1717 .await
1718 .expect_err("Write past end should fail");
1719 client.flush().await.unwrap();
1720
1721 runner.shutdown().await;
1722 outer_runner.shutdown().await;
1723
1724 let data = vmo.read_to_vec::<u8>(9 * 512, 512).unwrap();
1726 assert_eq!(&data[..], &buffer[..]);
1727 }
1728
1729 #[fuchsia::test]
1730 async fn offset_map_does_not_allow_partition_overwrite() {
1731 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1732 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1733 const PART_NAME: &str = FVM_PARTITION_LABEL;
1734
1735 let (block_device, partitions_dir) = setup_with_options(
1736 VmoBackedServerOptions {
1737 initial_contents: InitialContents::FromCapacity(16),
1738 block_size: 512,
1739 info: DeviceInfo::Block(BlockInfo {
1740 device_flags: fblock::DeviceFlag::READONLY | fblock::DeviceFlag::REMOVABLE,
1741 ..Default::default()
1742 }),
1743 ..Default::default()
1744 },
1745 vec![PartitionInfo {
1746 label: PART_NAME.to_string(),
1747 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1748 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1749 start_block: 4,
1750 num_blocks: 2,
1751 flags: 0xabcd,
1752 }],
1753 )
1754 .await;
1755
1756 let partitions_dir_clone = partitions_dir.clone();
1757 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
1758 .await
1759 .expect("load should succeed");
1760
1761 let part_dir = vfs::serve_directory(
1762 partitions_dir.clone(),
1763 vfs::path::Path::validate_and_split("part-000").unwrap(),
1764 fio::PERM_READABLE,
1765 );
1766
1767 let part_block =
1768 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1769 .expect("Failed to open Block service");
1770
1771 let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1774 part_block
1775 .open_session_with_offset_map(
1776 server_end,
1777 &fblock::BlockOffsetMapping {
1778 source_block_offset: 0,
1779 target_block_offset: 1,
1780 length: 2,
1781 },
1782 )
1783 .expect("FIDL error");
1784 session.get_fifo().await.expect_err("Session should be closed");
1785
1786 let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1787 part_block
1788 .open_session_with_offset_map(
1789 server_end,
1790 &fblock::BlockOffsetMapping {
1791 source_block_offset: 0,
1792 target_block_offset: 0,
1793 length: 3,
1794 },
1795 )
1796 .expect("FIDL error");
1797 session.get_fifo().await.expect_err("Session should be closed");
1798
1799 runner.shutdown().await;
1800 }
1801
1802 #[fuchsia::test]
1803 async fn test_vmos_detached_on_session_close() {
1804 let (block_device, partitions_dir) = setup(
1805 512,
1806 100,
1807 vec![PartitionInfo {
1808 type_guid: Guid::from_bytes([2u8; 16]),
1809 instance_guid: Guid::from_bytes([2u8; 16]),
1810 start_block: 34,
1811 num_blocks: 10,
1812 flags: 0,
1813 label: "test".to_string(),
1814 }],
1815 )
1816 .await;
1817
1818 let runner = GptManager::new(block_device.connect(), partitions_dir.clone()).await.unwrap();
1819 let proxy = vfs::serve_directory(
1820 partitions_dir.clone(),
1821 vfs::path::Path::validate_and_split("part-000").unwrap(),
1822 fio::PERM_READABLE,
1823 );
1824 let block = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&proxy, "volume")
1825 .expect("Failed to open block service");
1826 let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1827
1828 {
1829 let inner = runner.inner.lock().await;
1830 let backend = inner.partitions.get(&0).unwrap().session_manager().interface();
1831 assert_eq!(backend.vmo_count(), 1);
1832 }
1833
1834 client.close().await.expect("Failed to close client");
1835
1836 {
1837 let inner = runner.inner.lock().await;
1838 let backend = inner.partitions.get(&0).unwrap().session_manager().interface();
1839 assert_eq!(backend.vmo_count(), 0);
1840 }
1841
1842 runner.shutdown().await;
1843 }
1844}