1use crate::partition::PartitionBackend;
6use crate::partitions_directory::PartitionsDirectory;
7use anyhow::{anyhow, Context as _, Error};
8use block_client::{
9 BlockClient as _, BufferSlice, MutableBufferSlice, RemoteBlockClient, VmoId, WriteOptions,
10};
11use block_server::async_interface::SessionManager;
12use block_server::BlockServer;
13
14use fidl::endpoints::ServerEnd;
15use futures::lock::Mutex;
16use futures::stream::TryStreamExt as _;
17use std::collections::BTreeMap;
18use std::num::NonZero;
19use std::ops::Range;
20use std::sync::atomic::{AtomicBool, Ordering};
21use std::sync::{Arc, Weak};
22use zx::AsHandleRef as _;
23use {
24 fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_storage_partitions as fpartitions,
25 fuchsia_async as fasync,
26};
27
28fn partition_directory_entry_name(index: u32) -> String {
29 format!("part-{:03}", index)
30}
31
32pub struct GptPartition {
34 gpt: Weak<GptManager>,
35 block_client: Arc<RemoteBlockClient>,
36 block_range: Range<u64>,
37 index: u32,
38}
39
40fn trace_id(trace_flow_id: Option<NonZero<u64>>) -> u64 {
41 trace_flow_id.map(|v| v.get()).unwrap_or_default()
42}
43
44impl GptPartition {
45 pub fn new(
46 gpt: &Arc<GptManager>,
47 block_client: Arc<RemoteBlockClient>,
48 index: u32,
49 block_range: Range<u64>,
50 ) -> Arc<Self> {
51 debug_assert!(block_range.end >= block_range.start);
52 Arc::new(Self { gpt: Arc::downgrade(gpt), block_client, block_range, index })
53 }
54
55 pub async fn terminate(&self) {
56 if let Err(error) = self.block_client.close().await {
57 log::warn!(error:?; "Failed to close block client");
58 }
59 }
60
61 pub fn index(&self) -> u32 {
62 self.index
63 }
64
65 pub fn block_size(&self) -> u32 {
66 self.block_client.block_size()
67 }
68
69 pub fn block_count(&self) -> u64 {
70 self.block_range.end - self.block_range.start
71 }
72
73 pub async fn attach_vmo(&self, vmo: &zx::Vmo) -> Result<VmoId, zx::Status> {
74 self.block_client.attach_vmo(vmo).await
75 }
76
77 pub async fn detach_vmo(&self, vmoid: VmoId) -> Result<(), zx::Status> {
78 self.block_client.detach_vmo(vmoid).await
79 }
80
81 pub fn open_passthrough_session(&self, session: ServerEnd<fblock::SessionMarker>) {
82 if let Some(gpt) = self.gpt.upgrade() {
83 let mapping = fblock::BlockOffsetMapping {
84 source_block_offset: 0,
85 target_block_offset: self.block_range.start,
86 length: self.block_count(),
87 };
88 if let Err(err) = gpt.block_proxy.open_session_with_offset_map(session, &mapping) {
89 log::warn!(err:?; "Failed to open passthrough session");
92 }
93 } else {
94 if let Err(err) = session.close_with_epitaph(zx::Status::BAD_STATE) {
95 log::warn!(err:?; "Failed to send session epitaph");
96 }
97 }
98 }
99
100 pub async fn get_info(&self) -> Result<block_server::DeviceInfo, zx::Status> {
101 if let Some(gpt) = self.gpt.upgrade() {
102 gpt.inner
103 .lock()
104 .await
105 .gpt
106 .partitions()
107 .get(&self.index)
108 .map(|info| {
109 convert_partition_info(
110 info,
111 self.block_client.block_flags(),
112 self.block_client.max_transfer_blocks(),
113 )
114 })
115 .ok_or(zx::Status::BAD_STATE)
116 } else {
117 Err(zx::Status::BAD_STATE)
118 }
119 }
120
121 pub async fn read(
122 &self,
123 device_block_offset: u64,
124 block_count: u32,
125 vmo_id: &VmoId,
126 vmo_offset: u64, trace_flow_id: Option<NonZero<u64>>,
128 ) -> Result<(), zx::Status> {
129 let dev_offset = self
130 .absolute_offset(device_block_offset, block_count)
131 .map(|offset| offset * self.block_size() as u64)?;
132 let buffer = MutableBufferSlice::new_with_vmo_id(
133 vmo_id,
134 vmo_offset,
135 (block_count * self.block_size()) as u64,
136 );
137 self.block_client.read_at_traced(buffer, dev_offset, trace_id(trace_flow_id)).await
138 }
139
140 pub fn barrier(&self) {
141 self.block_client.barrier();
142 }
143
144 pub async fn write(
145 &self,
146 device_block_offset: u64,
147 block_count: u32,
148 vmo_id: &VmoId,
149 vmo_offset: u64, opts: WriteOptions,
151 trace_flow_id: Option<NonZero<u64>>,
152 ) -> Result<(), zx::Status> {
153 let dev_offset = self
154 .absolute_offset(device_block_offset, block_count)
155 .map(|offset| offset * self.block_size() as u64)?;
156 let buffer = BufferSlice::new_with_vmo_id(
157 vmo_id,
158 vmo_offset,
159 (block_count * self.block_size()) as u64,
160 );
161 self.block_client
162 .write_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
163 .await
164 }
165
166 pub async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
167 self.block_client.flush_traced(trace_id(trace_flow_id)).await
168 }
169
170 pub async fn trim(
171 &self,
172 device_block_offset: u64,
173 block_count: u32,
174 trace_flow_id: Option<NonZero<u64>>,
175 ) -> Result<(), zx::Status> {
176 let dev_offset = self
177 .absolute_offset(device_block_offset, block_count)
178 .map(|offset| offset * self.block_size() as u64)?;
179 let len = block_count as u64 * self.block_size() as u64;
180 self.block_client.trim_traced(dev_offset..dev_offset + len, trace_id(trace_flow_id)).await
181 }
182
183 fn absolute_offset(&self, mut offset: u64, len: u32) -> Result<u64, zx::Status> {
187 offset = offset.checked_add(self.block_range.start).ok_or(zx::Status::OUT_OF_RANGE)?;
188 let end = offset.checked_add(len as u64).ok_or(zx::Status::OUT_OF_RANGE)?;
189 if end > self.block_range.end {
190 Err(zx::Status::OUT_OF_RANGE)
191 } else {
192 Ok(offset)
193 }
194 }
195}
196
197fn convert_partition_info(
198 info: &gpt::PartitionInfo,
199 device_flags: fblock::Flag,
200 max_transfer_blocks: Option<NonZero<u32>>,
201) -> block_server::DeviceInfo {
202 block_server::DeviceInfo::Partition(block_server::PartitionInfo {
203 device_flags,
204 max_transfer_blocks,
205 block_range: Some(info.start_block..info.start_block + info.num_blocks),
206 type_guid: info.type_guid.to_bytes(),
207 instance_guid: info.instance_guid.to_bytes(),
208 name: info.label.clone(),
209 flags: info.flags,
210 })
211}
212
213struct PendingTransaction {
214 transaction: gpt::Transaction,
215 client_koid: zx::Koid,
216 added_partitions: Vec<u32>,
219 _signal_task: fasync::Task<()>,
221}
222
223struct Inner {
224 gpt: gpt::Gpt,
225 partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
226 partitions_dir: PartitionsDirectory,
229 pending_transaction: Option<PendingTransaction>,
230}
231
232impl Inner {
233 fn ensure_transaction_matches(&self, transaction: &zx::EventPair) -> Result<(), zx::Status> {
235 if let Some(pending) = self.pending_transaction.as_ref() {
236 if transaction.get_koid()? == pending.client_koid {
237 Ok(())
238 } else {
239 Err(zx::Status::BAD_HANDLE)
240 }
241 } else {
242 Err(zx::Status::BAD_STATE)
243 }
244 }
245
246 async fn bind_partition(
247 &mut self,
248 parent: &Arc<GptManager>,
249 index: u32,
250 info: gpt::PartitionInfo,
251 ) -> Result<(), Error> {
252 log::info!("GPT part {index}: {info:?}");
253 let partition = PartitionBackend::new(GptPartition::new(
254 parent,
255 self.gpt.client().clone(),
256 index,
257 info.start_block
258 ..info
259 .start_block
260 .checked_add(info.num_blocks)
261 .ok_or_else(|| anyhow!("Overflow in partition range"))?,
262 ));
263 let block_server = Arc::new(BlockServer::new(parent.block_size, partition));
264 self.partitions_dir.add_entry(
265 &partition_directory_entry_name(index),
266 Arc::downgrade(&block_server),
267 Arc::downgrade(parent),
268 index as usize,
269 );
270 self.partitions.insert(index, block_server);
271 Ok(())
272 }
273
274 async fn bind_all_partitions(&mut self, parent: &Arc<GptManager>) -> Result<(), Error> {
275 self.partitions.clear();
276 self.partitions_dir.clear();
277 for (index, info) in self.gpt.partitions().clone() {
278 self.bind_partition(parent, index, info).await?;
279 }
280 Ok(())
281 }
282
283 fn add_partition(&mut self, info: gpt::PartitionInfo) -> Result<usize, gpt::AddPartitionError> {
284 let pending = self.pending_transaction.as_mut().unwrap();
285 let idx = self.gpt.add_partition(&mut pending.transaction, info)?;
286 pending.added_partitions.push(idx as u32);
287 Ok(idx)
288 }
289}
290
291pub struct GptManager {
293 block_proxy: fblock::BlockProxy,
294 block_size: u32,
295 block_count: u64,
296 inner: Mutex<Inner>,
297 shutdown: AtomicBool,
298}
299
300impl std::fmt::Debug for GptManager {
301 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
302 f.debug_struct("GptManager")
303 .field("block_size", &self.block_size)
304 .field("block_count", &self.block_count)
305 .finish()
306 }
307}
308
309impl GptManager {
310 pub async fn new(
311 block_proxy: fblock::BlockProxy,
312 partitions_dir: Arc<vfs::directory::immutable::Simple>,
313 ) -> Result<Arc<Self>, Error> {
314 log::info!("Binding to GPT");
315 let client = Arc::new(RemoteBlockClient::new(block_proxy.clone()).await?);
316 let block_size = client.block_size();
317 let block_count = client.block_count();
318 let gpt = gpt::Gpt::open(client).await.context("Failed to load GPT")?;
319
320 let this = Arc::new(Self {
321 block_proxy,
322 block_size,
323 block_count,
324 inner: Mutex::new(Inner {
325 gpt,
326 partitions: BTreeMap::new(),
327 partitions_dir: PartitionsDirectory::new(partitions_dir),
328 pending_transaction: None,
329 }),
330 shutdown: AtomicBool::new(false),
331 });
332 log::info!("Bind to GPT OK, binding partitions");
333 this.inner.lock().await.bind_all_partitions(&this).await?;
334 log::info!("Starting all partitions OK!");
335 Ok(this)
336 }
337
338 pub fn block_size(&self) -> u32 {
339 self.block_size
340 }
341
342 pub fn block_count(&self) -> u64 {
343 self.block_count
344 }
345
346 pub async fn create_transaction(self: &Arc<Self>) -> Result<zx::EventPair, zx::Status> {
347 let mut inner = self.inner.lock().await;
348 if inner.pending_transaction.is_some() {
349 return Err(zx::Status::ALREADY_EXISTS);
350 }
351 let transaction = inner.gpt.create_transaction().unwrap();
352 let (client_end, server_end) = zx::EventPair::create();
353 let client_koid = client_end.get_koid()?;
354 let signal_waiter = fasync::OnSignals::new(server_end, zx::Signals::EVENTPAIR_PEER_CLOSED);
355 let this = self.clone();
356 let task = fasync::Task::spawn(async move {
357 let _ = signal_waiter.await;
358 let mut inner = this.inner.lock().await;
359 if inner.pending_transaction.as_ref().map_or(false, |t| t.client_koid == client_koid) {
360 inner.pending_transaction = None;
361 }
362 });
363 inner.pending_transaction = Some(PendingTransaction {
364 transaction,
365 client_koid,
366 added_partitions: vec![],
367 _signal_task: task,
368 });
369 Ok(client_end)
370 }
371
372 pub async fn commit_transaction(
373 self: &Arc<Self>,
374 transaction: zx::EventPair,
375 ) -> Result<(), zx::Status> {
376 let mut inner = self.inner.lock().await;
377 inner.ensure_transaction_matches(&transaction)?;
378 let pending = std::mem::take(&mut inner.pending_transaction).unwrap();
379 if let Err(err) = inner.gpt.commit_transaction(pending.transaction).await {
380 log::error!(err:?; "Failed to commit transaction");
381 return Err(zx::Status::IO);
382 }
383 for idx in pending.added_partitions {
384 let info = inner.gpt.partitions().get(&idx).ok_or(zx::Status::BAD_STATE)?.clone();
385 inner.bind_partition(self, idx, info).await.map_err(|err| {
386 log::error!(err:?; "Failed to bind partition");
387 zx::Status::BAD_STATE
388 })?;
389 }
390 Ok(())
391 }
392
393 pub async fn add_partition(
394 &self,
395 request: fpartitions::PartitionsManagerAddPartitionRequest,
396 ) -> Result<(), zx::Status> {
397 let mut inner = self.inner.lock().await;
398 inner.ensure_transaction_matches(
399 request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
400 )?;
401 let info = gpt::PartitionInfo {
402 label: request.name.ok_or(zx::Status::INVALID_ARGS)?,
403 type_guid: request
404 .type_guid
405 .map(|value| gpt::Guid::from_bytes(value.value))
406 .ok_or(zx::Status::INVALID_ARGS)?,
407 instance_guid: request
408 .instance_guid
409 .map(|value| gpt::Guid::from_bytes(value.value))
410 .unwrap_or_else(|| gpt::Guid::generate()),
411 start_block: 0,
412 num_blocks: request.num_blocks.ok_or(zx::Status::INVALID_ARGS)?,
413 flags: request.flags.unwrap_or_default(),
414 };
415 let idx = inner.add_partition(info)?;
416 let partition =
417 inner.pending_transaction.as_ref().unwrap().transaction.partitions.get(idx).unwrap();
418 log::info!(
419 "Allocated partition {:?} at {:?}",
420 partition.label,
421 partition.start_block..partition.start_block + partition.num_blocks
422 );
423 Ok(())
424 }
425
426 pub async fn handle_partitions_requests(
427 &self,
428 gpt_index: usize,
429 mut requests: fpartitions::PartitionRequestStream,
430 ) -> Result<(), zx::Status> {
431 while let Some(request) = requests.try_next().await.unwrap() {
432 match request {
433 fpartitions::PartitionRequest::UpdateMetadata { payload, responder } => {
434 responder
435 .send(
436 self.update_partition_metadata(gpt_index, payload)
437 .await
438 .map_err(|status| status.into_raw()),
439 )
440 .unwrap_or_else(
441 |err| log::error!(err:?; "Failed to send UpdateMetadata response"),
442 );
443 }
444 }
445 }
446 Ok(())
447 }
448
449 async fn update_partition_metadata(
450 &self,
451 gpt_index: usize,
452 request: fpartitions::PartitionUpdateMetadataRequest,
453 ) -> Result<(), zx::Status> {
454 let mut inner = self.inner.lock().await;
455 inner.ensure_transaction_matches(
456 request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
457 )?;
458
459 let transaction = &mut inner.pending_transaction.as_mut().unwrap().transaction;
460 let entry = transaction.partitions.get_mut(gpt_index).ok_or(zx::Status::BAD_STATE)?;
461 if let Some(type_guid) = request.type_guid.as_ref().cloned() {
462 entry.type_guid = gpt::Guid::from_bytes(type_guid.value);
463 }
464 if let Some(flags) = request.flags.as_ref() {
465 entry.flags = *flags;
466 }
467 Ok(())
468 }
469
470 pub async fn reset_partition_table(
471 self: &Arc<Self>,
472 partitions: Vec<gpt::PartitionInfo>,
473 ) -> Result<(), zx::Status> {
474 let mut inner = self.inner.lock().await;
475 if inner.pending_transaction.is_some() {
476 return Err(zx::Status::BAD_STATE);
477 }
478
479 log::info!("Resetting gpt. Expect data loss!!!");
480 let mut transaction = inner.gpt.create_transaction().unwrap();
481 transaction.partitions = partitions;
482 inner.gpt.commit_transaction(transaction).await?;
483
484 log::info!("Rebinding partitions...");
485 if let Err(err) = inner.bind_all_partitions(&self).await {
486 log::error!(err:?; "Failed to rebind partitions");
487 return Err(zx::Status::BAD_STATE);
488 }
489 log::info!("Rebinding partitions OK!");
490 Ok(())
491 }
492
493 pub async fn shutdown(self: Arc<Self>) {
494 log::info!("Shutting down gpt");
495 let mut inner = self.inner.lock().await;
496 inner.partitions_dir.clear();
497 inner.partitions.clear();
498 self.shutdown.store(true, Ordering::Relaxed);
499 log::info!("Shutting down gpt OK");
500 }
501}
502
503impl Drop for GptManager {
504 fn drop(&mut self) {
505 assert!(self.shutdown.load(Ordering::Relaxed), "Did you forget to shutdown?");
506 }
507}
508
509#[cfg(test)]
510mod tests {
511 use super::GptManager;
512 use block_client::{BlockClient as _, BufferSlice, MutableBufferSlice, RemoteBlockClient};
513 use block_server::{BlockInfo, DeviceInfo, WriteOptions};
514 use fidl::HandleBased as _;
515 use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
516 use gpt::{Gpt, Guid, PartitionInfo};
517 use std::num::NonZero;
518 use std::sync::atomic::{AtomicBool, Ordering};
519 use std::sync::Arc;
520 use vmo_backed_block_server::{
521 InitialContents, VmoBackedServer, VmoBackedServerOptions, VmoBackedServerTestingExt as _,
522 };
523 use {
524 fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_hardware_block_volume as fvolume,
525 fidl_fuchsia_io as fio, fidl_fuchsia_storage_partitions as fpartitions,
526 fuchsia_async as fasync,
527 };
528
529 async fn setup(
530 block_size: u32,
531 block_count: u64,
532 partitions: Vec<PartitionInfo>,
533 ) -> (Arc<VmoBackedServer>, Arc<vfs::directory::immutable::Simple>) {
534 setup_with_options(
535 VmoBackedServerOptions {
536 initial_contents: InitialContents::FromCapacity(block_count),
537 block_size,
538 ..Default::default()
539 },
540 partitions,
541 )
542 .await
543 }
544
545 async fn setup_with_options(
546 opts: VmoBackedServerOptions<'_>,
547 partitions: Vec<PartitionInfo>,
548 ) -> (Arc<VmoBackedServer>, Arc<vfs::directory::immutable::Simple>) {
549 let server = Arc::new(opts.build().unwrap());
550 {
551 let (block_client, block_server) =
552 fidl::endpoints::create_proxy::<fblock::BlockMarker>();
553 let volume_stream = fidl::endpoints::ServerEnd::<fvolume::VolumeMarker>::from(
554 block_server.into_channel(),
555 )
556 .into_stream();
557 let server_clone = server.clone();
558 let _task = fasync::Task::spawn(async move { server_clone.serve(volume_stream).await });
559 let client = Arc::new(RemoteBlockClient::new(block_client).await.unwrap());
560 Gpt::format(client, partitions).await.unwrap();
561 }
562 (server, vfs::directory::immutable::simple())
563 }
564
565 #[fuchsia::test]
566 async fn load_unformatted_gpt() {
567 let vmo = zx::Vmo::create(4096).unwrap();
568 let server = Arc::new(VmoBackedServer::from_vmo(512, vmo));
569
570 GptManager::new(server.connect(), vfs::directory::immutable::simple())
571 .await
572 .expect_err("load should fail");
573 }
574
575 #[fuchsia::test]
576 async fn load_formatted_empty_gpt() {
577 let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
578
579 let runner = GptManager::new(block_device.connect(), partitions_dir)
580 .await
581 .expect("load should succeed");
582 runner.shutdown().await;
583 }
584
585 #[fuchsia::test]
586 async fn load_formatted_gpt_with_one_partition() {
587 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
588 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
589 const PART_NAME: &str = "part";
590
591 let (block_device, partitions_dir) = setup(
592 512,
593 8,
594 vec![PartitionInfo {
595 label: PART_NAME.to_string(),
596 type_guid: Guid::from_bytes(PART_TYPE_GUID),
597 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
598 start_block: 4,
599 num_blocks: 1,
600 flags: 0,
601 }],
602 )
603 .await;
604
605 let partitions_dir_clone = partitions_dir.clone();
606 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
607 .await
608 .expect("load should succeed");
609 partitions_dir.get_entry("part-000").expect("No entry found");
610 partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
611 runner.shutdown().await;
612 }
613
614 #[fuchsia::test]
615 async fn load_formatted_gpt_with_two_partitions() {
616 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
617 const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
618 const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
619 const PART_1_NAME: &str = "part1";
620 const PART_2_NAME: &str = "part2";
621
622 let (block_device, partitions_dir) = setup(
623 512,
624 8,
625 vec![
626 PartitionInfo {
627 label: PART_1_NAME.to_string(),
628 type_guid: Guid::from_bytes(PART_TYPE_GUID),
629 instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
630 start_block: 4,
631 num_blocks: 1,
632 flags: 0,
633 },
634 PartitionInfo {
635 label: PART_2_NAME.to_string(),
636 type_guid: Guid::from_bytes(PART_TYPE_GUID),
637 instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
638 start_block: 5,
639 num_blocks: 1,
640 flags: 0,
641 },
642 ],
643 )
644 .await;
645
646 let partitions_dir_clone = partitions_dir.clone();
647 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
648 .await
649 .expect("load should succeed");
650 partitions_dir.get_entry("part-000").expect("No entry found");
651 partitions_dir.get_entry("part-001").expect("No entry found");
652 partitions_dir.get_entry("part-002").map(|_| ()).expect_err("Extra entry found");
653 runner.shutdown().await;
654 }
655
656 #[fuchsia::test]
657 async fn partition_io() {
658 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
659 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
660 const PART_NAME: &str = "part";
661
662 let (block_device, partitions_dir) = setup(
663 512,
664 8,
665 vec![PartitionInfo {
666 label: PART_NAME.to_string(),
667 type_guid: Guid::from_bytes(PART_TYPE_GUID),
668 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
669 start_block: 4,
670 num_blocks: 2,
671 flags: 0,
672 }],
673 )
674 .await;
675
676 let partitions_dir_clone = partitions_dir.clone();
677 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
678 .await
679 .expect("load should succeed");
680
681 let proxy = vfs::serve_directory(
682 partitions_dir.clone(),
683 vfs::path::Path::validate_and_split("part-000").unwrap(),
684 fio::PERM_READABLE,
685 );
686 let block =
687 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
688 .expect("Failed to open block service");
689 let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
690
691 assert_eq!(client.block_count(), 2);
692 assert_eq!(client.block_size(), 512);
693
694 let buf = vec![0xabu8; 512];
695 client.write_at(BufferSlice::Memory(&buf[..]), 0).await.expect("write_at failed");
696 client
697 .write_at(BufferSlice::Memory(&buf[..]), 1024)
698 .await
699 .expect_err("write_at should fail when writing past partition end");
700 let mut buf2 = vec![0u8; 512];
701 client.read_at(MutableBufferSlice::Memory(&mut buf2[..]), 0).await.expect("read_at failed");
702 assert_eq!(buf, buf2);
703 client
704 .read_at(MutableBufferSlice::Memory(&mut buf2[..]), 1024)
705 .await
706 .expect_err("read_at should fail when reading past partition end");
707 client.trim(512..1024).await.expect("trim failed");
708 client.trim(1..512).await.expect_err("trim with invalid range should fail");
709 client.trim(1024..1536).await.expect_err("trim past end of partition should fail");
710 runner.shutdown().await;
711
712 let mut buf = vec![0u8; 512];
714 let client =
715 RemoteBlockClient::new(block_device.connect::<fblock::BlockProxy>()).await.unwrap();
716 client.read_at(MutableBufferSlice::Memory(&mut buf[..]), 2048).await.unwrap();
717 assert_eq!(&buf[..], &[0xabu8; 512]);
718 }
719
720 #[fuchsia::test]
721 async fn load_formatted_gpt_with_invalid_primary_header() {
722 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
723 const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
724 const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
725 const PART_1_NAME: &str = "part1";
726 const PART_2_NAME: &str = "part2";
727
728 let (block_device, partitions_dir) = setup(
729 512,
730 8,
731 vec![
732 PartitionInfo {
733 label: PART_1_NAME.to_string(),
734 type_guid: Guid::from_bytes(PART_TYPE_GUID),
735 instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
736 start_block: 4,
737 num_blocks: 1,
738 flags: 0,
739 },
740 PartitionInfo {
741 label: PART_2_NAME.to_string(),
742 type_guid: Guid::from_bytes(PART_TYPE_GUID),
743 instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
744 start_block: 5,
745 num_blocks: 1,
746 flags: 0,
747 },
748 ],
749 )
750 .await;
751 {
752 let (client, stream) =
753 fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
754 let server = block_device.clone();
755 let _task = fasync::Task::spawn(async move { server.serve(stream).await });
756 let client = RemoteBlockClient::new(client).await.unwrap();
757 client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 512).await.unwrap();
758 }
759
760 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
761 .await
762 .expect("load should succeed");
763 partitions_dir.get_entry("part-000").expect("No entry found");
764 partitions_dir.get_entry("part-001").expect("No entry found");
765 runner.shutdown().await;
766 }
767
768 #[fuchsia::test]
769 async fn load_formatted_gpt_with_invalid_primary_partition_table() {
770 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
771 const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
772 const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
773 const PART_1_NAME: &str = "part1";
774 const PART_2_NAME: &str = "part2";
775
776 let (block_device, partitions_dir) = setup(
777 512,
778 8,
779 vec![
780 PartitionInfo {
781 label: PART_1_NAME.to_string(),
782 type_guid: Guid::from_bytes(PART_TYPE_GUID),
783 instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
784 start_block: 4,
785 num_blocks: 1,
786 flags: 0,
787 },
788 PartitionInfo {
789 label: PART_2_NAME.to_string(),
790 type_guid: Guid::from_bytes(PART_TYPE_GUID),
791 instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
792 start_block: 5,
793 num_blocks: 1,
794 flags: 0,
795 },
796 ],
797 )
798 .await;
799 {
800 let (client, stream) =
801 fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
802 let server = block_device.clone();
803 let _task = fasync::Task::spawn(async move { server.serve(stream).await });
804 let client = RemoteBlockClient::new(client).await.unwrap();
805 client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 1024).await.unwrap();
806 }
807
808 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
809 .await
810 .expect("load should succeed");
811 partitions_dir.get_entry("part-000").expect("No entry found");
812 partitions_dir.get_entry("part-001").expect("No entry found");
813 runner.shutdown().await;
814 }
815
816 #[fuchsia::test]
817 async fn force_access_passed_through() {
818 const BLOCK_SIZE: u32 = 512;
819 const BLOCK_COUNT: u64 = 1024;
820
821 struct Observer(Arc<AtomicBool>);
822
823 impl vmo_backed_block_server::Observer for Observer {
824 fn write(
825 &self,
826 _device_block_offset: u64,
827 _block_count: u32,
828 _vmo: &Arc<zx::Vmo>,
829 _vmo_offset: u64,
830 opts: WriteOptions,
831 ) -> vmo_backed_block_server::WriteAction {
832 assert_eq!(
833 opts.contains(WriteOptions::FORCE_ACCESS),
834 self.0.load(Ordering::Relaxed)
835 );
836 vmo_backed_block_server::WriteAction::Write
837 }
838 }
839
840 let expect_force_access = Arc::new(AtomicBool::new(false));
841 let (server, partitions_dir) = setup_with_options(
842 VmoBackedServerOptions {
843 initial_contents: InitialContents::FromCapacity(BLOCK_COUNT),
844 block_size: BLOCK_SIZE,
845 observer: Some(Box::new(Observer(expect_force_access.clone()))),
846 ..Default::default()
847 },
848 vec![PartitionInfo {
849 label: "foo".to_string(),
850 type_guid: Guid::from_bytes([1; 16]),
851 instance_guid: Guid::from_bytes([2; 16]),
852 start_block: 4,
853 num_blocks: 1,
854 flags: 0,
855 }],
856 )
857 .await;
858
859 let manager = GptManager::new(server.connect(), partitions_dir.clone()).await.unwrap();
860
861 let proxy = vfs::serve_directory(
862 partitions_dir.clone(),
863 vfs::path::Path::validate_and_split("part-000").unwrap(),
864 fio::PERM_READABLE,
865 );
866 let block =
867 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
868 .expect("Failed to open block service");
869 let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
870
871 let buffer = vec![0; BLOCK_SIZE as usize];
872 client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
873
874 expect_force_access.store(true, Ordering::Relaxed);
875
876 client
877 .write_at_with_opts(BufferSlice::Memory(&buffer), 0, WriteOptions::FORCE_ACCESS)
878 .await
879 .unwrap();
880
881 manager.shutdown().await;
882 }
883
884 #[fuchsia::test]
885 async fn barrier_passed_through() {
886 const BLOCK_SIZE: u32 = 512;
887 const BLOCK_COUNT: u64 = 1024;
888
889 struct Observer(Arc<AtomicBool>);
890
891 impl vmo_backed_block_server::Observer for Observer {
892 fn barrier(&self) {
893 self.0.store(true, Ordering::Relaxed);
894 }
895 }
896
897 let expect_barrier = Arc::new(AtomicBool::new(false));
898 let (server, partitions_dir) = setup_with_options(
899 VmoBackedServerOptions {
900 initial_contents: InitialContents::FromCapacity(BLOCK_COUNT),
901 block_size: BLOCK_SIZE,
902 observer: Some(Box::new(Observer(expect_barrier.clone()))),
903 ..Default::default()
904 },
905 vec![PartitionInfo {
906 label: "foo".to_string(),
907 type_guid: Guid::from_bytes([1; 16]),
908 instance_guid: Guid::from_bytes([2; 16]),
909 start_block: 4,
910 num_blocks: 1,
911 flags: 0,
912 }],
913 )
914 .await;
915
916 let manager = GptManager::new(server.connect(), partitions_dir.clone()).await.unwrap();
917
918 let proxy = vfs::serve_directory(
919 partitions_dir.clone(),
920 vfs::path::Path::validate_and_split("part-000").unwrap(),
921 fio::PERM_READABLE,
922 );
923 let block =
924 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
925 .expect("Failed to open block service");
926 let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
927
928 let buffer = vec![0; BLOCK_SIZE as usize];
929 client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
930
931 client.barrier();
932 client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
933
934 assert!(expect_barrier.load(Ordering::Relaxed));
935
936 manager.shutdown().await;
937 }
938
939 #[fuchsia::test]
940 async fn commit_transaction() {
941 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
942 const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
943 const PART_1_NAME: &str = "part";
944 const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
945 const PART_2_NAME: &str = "part2";
946
947 let (block_device, partitions_dir) = setup(
948 512,
949 16,
950 vec![
951 PartitionInfo {
952 label: PART_1_NAME.to_string(),
953 type_guid: Guid::from_bytes(PART_TYPE_GUID),
954 instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
955 start_block: 4,
956 num_blocks: 1,
957 flags: 0,
958 },
959 PartitionInfo {
960 label: PART_2_NAME.to_string(),
961 type_guid: Guid::from_bytes(PART_TYPE_GUID),
962 instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
963 start_block: 5,
964 num_blocks: 1,
965 flags: 0,
966 },
967 ],
968 )
969 .await;
970 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
971 .await
972 .expect("load should succeed");
973
974 let part_0_dir = vfs::serve_directory(
975 partitions_dir.clone(),
976 vfs::Path::validate_and_split("part-000").unwrap(),
977 fio::PERM_READABLE,
978 );
979 let part_1_dir = vfs::serve_directory(
980 partitions_dir.clone(),
981 vfs::Path::validate_and_split("part-001").unwrap(),
982 fio::PERM_READABLE,
983 );
984 let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
985 &part_0_dir,
986 "partition",
987 )
988 .expect("Failed to open Partition service");
989 let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
990 &part_1_dir,
991 "partition",
992 )
993 .expect("Failed to open Partition service");
994
995 let transaction = runner.create_transaction().await.expect("Failed to create transaction");
996 part_0_proxy
997 .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
998 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
999 type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid {
1000 value: [0xffu8; 16],
1001 }),
1002 ..Default::default()
1003 })
1004 .await
1005 .expect("FIDL error")
1006 .expect("Failed to update_metadata");
1007 part_1_proxy
1008 .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
1009 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1010 flags: Some(1234),
1011 ..Default::default()
1012 })
1013 .await
1014 .expect("FIDL error")
1015 .expect("Failed to update_metadata");
1016 runner.commit_transaction(transaction).await.expect("Failed to commit transaction");
1017
1018 let part_0_block =
1020 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_0_dir, "volume")
1021 .expect("Failed to open Volume service");
1022 let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
1023 assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1024 assert_eq!(guid.unwrap().value, [0xffu8; 16]);
1025 let part_1_block =
1026 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_1_dir, "volume")
1027 .expect("Failed to open Volume service");
1028 let metadata =
1029 part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1030 assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1031 assert_eq!(metadata.flags, Some(1234));
1032
1033 runner.shutdown().await;
1034 }
1035
1036 #[fuchsia::test]
1037 async fn reset_partition_tables() {
1038 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1041 const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1042 const PART_1_NAME: &str = "part";
1043 const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
1044 const PART_2_NAME: &str = "part2";
1045 const PART_3_NAME: &str = "part3";
1046 const PART_4_NAME: &str = "part4";
1047
1048 let (block_device, partitions_dir) = setup(
1049 512,
1050 1048576 / 512,
1051 vec![
1052 PartitionInfo {
1053 label: PART_1_NAME.to_string(),
1054 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1055 instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
1056 start_block: 4,
1057 num_blocks: 1,
1058 flags: 0,
1059 },
1060 PartitionInfo {
1061 label: PART_2_NAME.to_string(),
1062 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1063 instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1064 start_block: 5,
1065 num_blocks: 1,
1066 flags: 0,
1067 },
1068 ],
1069 )
1070 .await;
1071 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1072 .await
1073 .expect("load should succeed");
1074 let nil_entry = PartitionInfo {
1075 label: "".to_string(),
1076 type_guid: Guid::from_bytes([0u8; 16]),
1077 instance_guid: Guid::from_bytes([0u8; 16]),
1078 start_block: 0,
1079 num_blocks: 0,
1080 flags: 0,
1081 };
1082 let mut new_partitions = vec![nil_entry; 128];
1083 new_partitions[0] = PartitionInfo {
1084 label: PART_3_NAME.to_string(),
1085 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1086 instance_guid: Guid::from_bytes([1u8; 16]),
1087 start_block: 64,
1088 num_blocks: 2,
1089 flags: 0,
1090 };
1091 new_partitions[2] = PartitionInfo {
1092 label: PART_4_NAME.to_string(),
1093 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1094 instance_guid: Guid::from_bytes([2u8; 16]),
1095 start_block: 66,
1096 num_blocks: 4,
1097 flags: 0,
1098 };
1099 runner.reset_partition_table(new_partitions).await.expect("reset_partition_table failed");
1100 partitions_dir.get_entry("part-000").expect("No entry found");
1101 partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
1102 partitions_dir.get_entry("part-002").expect("No entry found");
1103
1104 let proxy = vfs::serve_directory(
1105 partitions_dir.clone(),
1106 vfs::path::Path::validate_and_split("part-000").unwrap(),
1107 fio::PERM_READABLE,
1108 );
1109 let block =
1110 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1111 .expect("Failed to open block service");
1112 let (status, name) = block.get_name().await.expect("FIDL error");
1113 assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1114 assert_eq!(name.unwrap(), PART_3_NAME);
1115
1116 runner.shutdown().await;
1117 }
1118
1119 #[fuchsia::test]
1120 async fn reset_partition_tables_fails_if_too_many_partitions() {
1121 let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
1122 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1123 .await
1124 .expect("load should succeed");
1125 let nil_entry = PartitionInfo {
1126 label: "".to_string(),
1127 type_guid: Guid::from_bytes([0u8; 16]),
1128 instance_guid: Guid::from_bytes([0u8; 16]),
1129 start_block: 0,
1130 num_blocks: 0,
1131 flags: 0,
1132 };
1133 let new_partitions = vec![nil_entry; 128];
1134 runner
1135 .reset_partition_table(new_partitions)
1136 .await
1137 .expect_err("reset_partition_table should fail");
1138
1139 runner.shutdown().await;
1140 }
1141
1142 #[fuchsia::test]
1143 async fn reset_partition_tables_fails_if_too_large_partitions() {
1144 let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1145 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1146 .await
1147 .expect("load should succeed");
1148 let new_partitions = vec![
1149 PartitionInfo {
1150 label: "a".to_string(),
1151 type_guid: Guid::from_bytes([1u8; 16]),
1152 instance_guid: Guid::from_bytes([1u8; 16]),
1153 start_block: 4,
1154 num_blocks: 2,
1155 flags: 0,
1156 },
1157 PartitionInfo {
1158 label: "b".to_string(),
1159 type_guid: Guid::from_bytes([2u8; 16]),
1160 instance_guid: Guid::from_bytes([2u8; 16]),
1161 start_block: 6,
1162 num_blocks: 200,
1163 flags: 0,
1164 },
1165 ];
1166 runner
1167 .reset_partition_table(new_partitions)
1168 .await
1169 .expect_err("reset_partition_table should fail");
1170
1171 runner.shutdown().await;
1172 }
1173
1174 #[fuchsia::test]
1175 async fn reset_partition_tables_fails_if_partition_overlaps_metadata() {
1176 let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1177 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1178 .await
1179 .expect("load should succeed");
1180 let new_partitions = vec![PartitionInfo {
1181 label: "a".to_string(),
1182 type_guid: Guid::from_bytes([1u8; 16]),
1183 instance_guid: Guid::from_bytes([1u8; 16]),
1184 start_block: 1,
1185 num_blocks: 2,
1186 flags: 0,
1187 }];
1188 runner
1189 .reset_partition_table(new_partitions)
1190 .await
1191 .expect_err("reset_partition_table should fail");
1192
1193 runner.shutdown().await;
1194 }
1195
1196 #[fuchsia::test]
1197 async fn reset_partition_tables_fails_if_partitions_overlap() {
1198 let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1199 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1200 .await
1201 .expect("load should succeed");
1202 let new_partitions = vec![
1203 PartitionInfo {
1204 label: "a".to_string(),
1205 type_guid: Guid::from_bytes([1u8; 16]),
1206 instance_guid: Guid::from_bytes([1u8; 16]),
1207 start_block: 32,
1208 num_blocks: 2,
1209 flags: 0,
1210 },
1211 PartitionInfo {
1212 label: "b".to_string(),
1213 type_guid: Guid::from_bytes([2u8; 16]),
1214 instance_guid: Guid::from_bytes([2u8; 16]),
1215 start_block: 33,
1216 num_blocks: 1,
1217 flags: 0,
1218 },
1219 ];
1220 runner
1221 .reset_partition_table(new_partitions)
1222 .await
1223 .expect_err("reset_partition_table should fail");
1224
1225 runner.shutdown().await;
1226 }
1227
1228 #[fuchsia::test]
1229 async fn add_partition() {
1230 let (block_device, partitions_dir) = setup(512, 64, vec![PartitionInfo::nil(); 64]).await;
1231 let runner = GptManager::new(block_device.connect(), partitions_dir.clone())
1232 .await
1233 .expect("load should succeed");
1234
1235 let transaction = runner.create_transaction().await.expect("Create transaction failed");
1236 let request = fpartitions::PartitionsManagerAddPartitionRequest {
1237 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1238 name: Some("a".to_string()),
1239 type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid { value: [1u8; 16] }),
1240 num_blocks: Some(2),
1241 ..Default::default()
1242 };
1243 runner.add_partition(request).await.expect("add_partition failed");
1244 runner.commit_transaction(transaction).await.expect("add_partition failed");
1245
1246 let proxy = vfs::serve_directory(
1247 partitions_dir.clone(),
1248 vfs::path::Path::validate_and_split("part-000").unwrap(),
1249 fio::PERM_READABLE,
1250 );
1251 let block =
1252 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1253 .expect("Failed to open block service");
1254 let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1255
1256 assert_eq!(client.block_count(), 2);
1257 assert_eq!(client.block_size(), 512);
1258
1259 runner.shutdown().await;
1260 }
1261
1262 #[fuchsia::test]
1263 async fn partition_info() {
1264 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1265 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1266 const PART_NAME: &str = "part";
1267
1268 let (block_device, partitions_dir) = setup_with_options(
1269 VmoBackedServerOptions {
1270 initial_contents: InitialContents::FromCapacity(16),
1271 block_size: 512,
1272 info: DeviceInfo::Block(BlockInfo {
1273 max_transfer_blocks: NonZero::new(2),
1274 device_flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1275 ..Default::default()
1276 }),
1277 ..Default::default()
1278 },
1279 vec![PartitionInfo {
1280 label: PART_NAME.to_string(),
1281 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1282 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1283 start_block: 4,
1284 num_blocks: 1,
1285 flags: 0xabcd,
1286 }],
1287 )
1288 .await;
1289
1290 let partitions_dir_clone = partitions_dir.clone();
1291 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
1292 .await
1293 .expect("load should succeed");
1294
1295 let part_dir = vfs::serve_directory(
1296 partitions_dir.clone(),
1297 vfs::path::Path::validate_and_split("part-000").unwrap(),
1298 fio::PERM_READABLE,
1299 );
1300 let part_block =
1301 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_dir, "volume")
1302 .expect("Failed to open Volume service");
1303 let info = part_block.get_info().await.expect("FIDL error").expect("get_info failed");
1304 assert_eq!(info.block_count, 1);
1305 assert_eq!(info.block_size, 512);
1306 assert_eq!(info.flags, fblock::Flag::READONLY | fblock::Flag::REMOVABLE);
1307 assert_eq!(info.max_transfer_size, 1024);
1308
1309 let metadata =
1310 part_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1311 assert_eq!(metadata.name, Some(PART_NAME.to_string()));
1312 assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1313 assert_eq!(metadata.instance_guid.unwrap().value, PART_INSTANCE_GUID);
1314 assert_eq!(metadata.start_block_offset, Some(4));
1315 assert_eq!(metadata.num_blocks, Some(1));
1316 assert_eq!(metadata.flags, Some(0xabcd));
1317
1318 runner.shutdown().await;
1319 }
1320
1321 #[fuchsia::test]
1322 async fn nested_gpt() {
1323 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1324 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1325 const PART_NAME: &str = "part";
1326
1327 let vmo = zx::Vmo::create(64 * 512).unwrap();
1328 let vmo_clone = vmo.create_child(zx::VmoChildOptions::REFERENCE, 0, 0).unwrap();
1329 let (outer_block_device, outer_partitions_dir) = setup_with_options(
1330 VmoBackedServerOptions {
1331 initial_contents: InitialContents::FromVmo(vmo_clone),
1332 block_size: 512,
1333 info: DeviceInfo::Block(BlockInfo {
1334 device_flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1335 ..Default::default()
1336 }),
1337 ..Default::default()
1338 },
1339 vec![PartitionInfo {
1340 label: PART_NAME.to_string(),
1341 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1342 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1343 start_block: 4,
1344 num_blocks: 16,
1345 flags: 0xabcd,
1346 }],
1347 )
1348 .await;
1349
1350 let outer_partitions_dir_clone = outer_partitions_dir.clone();
1351 let outer_runner =
1352 GptManager::new(outer_block_device.connect(), outer_partitions_dir_clone)
1353 .await
1354 .expect("load should succeed");
1355
1356 let outer_part_dir = vfs::serve_directory(
1357 outer_partitions_dir.clone(),
1358 vfs::path::Path::validate_and_split("part-000").unwrap(),
1359 fio::PERM_READABLE,
1360 );
1361 let part_block =
1362 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&outer_part_dir, "volume")
1363 .expect("Failed to open Block service");
1364
1365 let client = Arc::new(RemoteBlockClient::new(part_block.clone()).await.unwrap());
1366 let _ = gpt::Gpt::format(
1367 client,
1368 vec![PartitionInfo {
1369 label: PART_NAME.to_string(),
1370 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1371 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1372 start_block: 5,
1373 num_blocks: 1,
1374 flags: 0xabcd,
1375 }],
1376 )
1377 .await
1378 .unwrap();
1379
1380 let partitions_dir = vfs::directory::immutable::simple();
1381 let partitions_dir_clone = partitions_dir.clone();
1382 let runner =
1383 GptManager::new(part_block, partitions_dir_clone).await.expect("load should succeed");
1384 let part_dir = vfs::serve_directory(
1385 partitions_dir.clone(),
1386 vfs::path::Path::validate_and_split("part-000").unwrap(),
1387 fio::PERM_READABLE,
1388 );
1389 let inner_part_block =
1390 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1391 .expect("Failed to open Block service");
1392
1393 let client =
1394 RemoteBlockClient::new(inner_part_block).await.expect("Failed to create block client");
1395 assert_eq!(client.block_count(), 1);
1396 assert_eq!(client.block_size(), 512);
1397
1398 let buffer = vec![0xaa; 512];
1399 client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1400 client
1401 .write_at(BufferSlice::Memory(&buffer), 512)
1402 .await
1403 .expect_err("Write past end should fail");
1404 client.flush().await.unwrap();
1405
1406 runner.shutdown().await;
1407 outer_runner.shutdown().await;
1408
1409 let data = vmo.read_to_vec(9 * 512, 512).unwrap();
1411 assert_eq!(&data[..], &buffer[..]);
1412 }
1413
1414 #[fuchsia::test]
1415 async fn offset_map_does_not_allow_partition_overwrite() {
1416 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1417 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1418 const PART_NAME: &str = "part";
1419
1420 let (block_device, partitions_dir) = setup_with_options(
1421 VmoBackedServerOptions {
1422 initial_contents: InitialContents::FromCapacity(16),
1423 block_size: 512,
1424 info: DeviceInfo::Block(BlockInfo {
1425 device_flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1426 ..Default::default()
1427 }),
1428 ..Default::default()
1429 },
1430 vec![PartitionInfo {
1431 label: PART_NAME.to_string(),
1432 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1433 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1434 start_block: 4,
1435 num_blocks: 2,
1436 flags: 0xabcd,
1437 }],
1438 )
1439 .await;
1440
1441 let partitions_dir_clone = partitions_dir.clone();
1442 let runner = GptManager::new(block_device.connect(), partitions_dir_clone)
1443 .await
1444 .expect("load should succeed");
1445
1446 let part_dir = vfs::serve_directory(
1447 partitions_dir.clone(),
1448 vfs::path::Path::validate_and_split("part-000").unwrap(),
1449 fio::PERM_READABLE,
1450 );
1451
1452 let part_block =
1453 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1454 .expect("Failed to open Block service");
1455
1456 let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1459 part_block
1460 .open_session_with_offset_map(
1461 server_end,
1462 &fblock::BlockOffsetMapping {
1463 source_block_offset: 0,
1464 target_block_offset: 1,
1465 length: 2,
1466 },
1467 )
1468 .expect("FIDL error");
1469 session.get_fifo().await.expect_err("Session should be closed");
1470
1471 let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1472 part_block
1473 .open_session_with_offset_map(
1474 server_end,
1475 &fblock::BlockOffsetMapping {
1476 source_block_offset: 0,
1477 target_block_offset: 0,
1478 length: 3,
1479 },
1480 )
1481 .expect("FIDL error");
1482 session.get_fifo().await.expect_err("Session should be closed");
1483
1484 runner.shutdown().await;
1485 }
1486}