1use crate::partition::PartitionBackend;
6use crate::partitions_directory::PartitionsDirectory;
7use anyhow::{anyhow, Context as _, Error};
8use block_client::{
9 BlockClient as _, BufferSlice, MutableBufferSlice, RemoteBlockClient, VmoId, WriteOptions,
10};
11use block_server::async_interface::SessionManager;
12use block_server::BlockServer;
13
14use fidl::endpoints::ServerEnd;
15use futures::lock::Mutex;
16use futures::stream::TryStreamExt as _;
17use std::collections::BTreeMap;
18use std::num::NonZero;
19use std::ops::Range;
20use std::sync::atomic::{AtomicBool, Ordering};
21use std::sync::{Arc, Weak};
22use zx::AsHandleRef as _;
23use {
24 fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_storage_partitions as fpartitions,
25 fuchsia_async as fasync,
26};
27
28fn partition_directory_entry_name(index: u32) -> String {
29 format!("part-{:03}", index)
30}
31
32pub struct GptPartition {
34 gpt: Weak<GptManager>,
35 block_client: Arc<RemoteBlockClient>,
36 block_range: Range<u64>,
37 index: u32,
38}
39
40fn trace_id(trace_flow_id: Option<NonZero<u64>>) -> u64 {
41 trace_flow_id.map(|v| v.get()).unwrap_or_default()
42}
43
44impl GptPartition {
45 pub fn new(
46 gpt: &Arc<GptManager>,
47 block_client: Arc<RemoteBlockClient>,
48 index: u32,
49 block_range: Range<u64>,
50 ) -> Arc<Self> {
51 debug_assert!(block_range.end >= block_range.start);
52 Arc::new(Self { gpt: Arc::downgrade(gpt), block_client, block_range, index })
53 }
54
55 pub async fn terminate(&self) {
56 if let Err(error) = self.block_client.close().await {
57 log::warn!(error:?; "Failed to close block client");
58 }
59 }
60
61 pub fn index(&self) -> u32 {
62 self.index
63 }
64
65 pub fn block_size(&self) -> u32 {
66 self.block_client.block_size()
67 }
68
69 pub fn block_count(&self) -> u64 {
70 self.block_range.end - self.block_range.start
71 }
72
73 pub async fn attach_vmo(&self, vmo: &zx::Vmo) -> Result<VmoId, zx::Status> {
74 self.block_client.attach_vmo(vmo).await
75 }
76
77 pub async fn detach_vmo(&self, vmoid: VmoId) -> Result<(), zx::Status> {
78 self.block_client.detach_vmo(vmoid).await
79 }
80
81 pub fn open_passthrough_session(&self, session: ServerEnd<fblock::SessionMarker>) {
82 if let Some(gpt) = self.gpt.upgrade() {
83 let mappings = [fblock::BlockOffsetMapping {
84 source_block_offset: 0,
85 target_block_offset: self.block_range.start,
86 length: self.block_count(),
87 }];
88 if let Err(err) =
89 gpt.block_proxy.open_session_with_offset_map(session, None, Some(&mappings[..]))
90 {
91 log::warn!(err:?; "Failed to open passthrough session");
94 }
95 } else {
96 if let Err(err) = session.close_with_epitaph(zx::Status::BAD_STATE) {
97 log::warn!(err:?; "Failed to send session epitaph");
98 }
99 }
100 }
101
102 pub async fn get_info(&self) -> Result<block_server::DeviceInfo, zx::Status> {
103 if let Some(gpt) = self.gpt.upgrade() {
104 gpt.inner
105 .lock()
106 .await
107 .gpt
108 .partitions()
109 .get(&self.index)
110 .map(|info| {
111 convert_partition_info(
112 info,
113 self.block_client.block_flags(),
114 self.block_client.max_transfer_blocks(),
115 )
116 })
117 .ok_or(zx::Status::BAD_STATE)
118 } else {
119 Err(zx::Status::BAD_STATE)
120 }
121 }
122
123 pub async fn read(
124 &self,
125 device_block_offset: u64,
126 block_count: u32,
127 vmo_id: &VmoId,
128 vmo_offset: u64, trace_flow_id: Option<NonZero<u64>>,
130 ) -> Result<(), zx::Status> {
131 let dev_offset = self
132 .absolute_offset(device_block_offset, block_count)
133 .map(|offset| offset * self.block_size() as u64)?;
134 let buffer = MutableBufferSlice::new_with_vmo_id(
135 vmo_id,
136 vmo_offset,
137 (block_count * self.block_size()) as u64,
138 );
139 self.block_client.read_at_traced(buffer, dev_offset, trace_id(trace_flow_id)).await
140 }
141
142 pub async fn write(
143 &self,
144 device_block_offset: u64,
145 block_count: u32,
146 vmo_id: &VmoId,
147 vmo_offset: u64, opts: WriteOptions,
149 trace_flow_id: Option<NonZero<u64>>,
150 ) -> Result<(), zx::Status> {
151 let dev_offset = self
152 .absolute_offset(device_block_offset, block_count)
153 .map(|offset| offset * self.block_size() as u64)?;
154 let buffer = BufferSlice::new_with_vmo_id(
155 vmo_id,
156 vmo_offset,
157 (block_count * self.block_size()) as u64,
158 );
159 self.block_client
160 .write_at_with_opts_traced(buffer, dev_offset, opts, trace_id(trace_flow_id))
161 .await
162 }
163
164 pub async fn flush(&self, trace_flow_id: Option<NonZero<u64>>) -> Result<(), zx::Status> {
165 self.block_client.flush_traced(trace_id(trace_flow_id)).await
166 }
167
168 pub async fn trim(
169 &self,
170 device_block_offset: u64,
171 block_count: u32,
172 trace_flow_id: Option<NonZero<u64>>,
173 ) -> Result<(), zx::Status> {
174 let dev_offset = self
175 .absolute_offset(device_block_offset, block_count)
176 .map(|offset| offset * self.block_size() as u64)?;
177 let len = block_count as u64 * self.block_size() as u64;
178 self.block_client.trim_traced(dev_offset..dev_offset + len, trace_id(trace_flow_id)).await
179 }
180
181 fn absolute_offset(&self, mut offset: u64, len: u32) -> Result<u64, zx::Status> {
185 offset = offset.checked_add(self.block_range.start).ok_or(zx::Status::OUT_OF_RANGE)?;
186 let end = offset.checked_add(len as u64).ok_or(zx::Status::OUT_OF_RANGE)?;
187 if end > self.block_range.end {
188 Err(zx::Status::OUT_OF_RANGE)
189 } else {
190 Ok(offset)
191 }
192 }
193}
194
195fn convert_partition_info(
196 info: &gpt::PartitionInfo,
197 device_flags: fblock::Flag,
198 max_transfer_blocks: Option<NonZero<u32>>,
199) -> block_server::DeviceInfo {
200 block_server::DeviceInfo::Partition(block_server::PartitionInfo {
201 device_flags,
202 max_transfer_blocks,
203 block_range: Some(info.start_block..info.start_block + info.num_blocks),
204 type_guid: info.type_guid.to_bytes(),
205 instance_guid: info.instance_guid.to_bytes(),
206 name: info.label.clone(),
207 flags: info.flags,
208 })
209}
210
211struct PendingTransaction {
212 transaction: gpt::Transaction,
213 client_koid: zx::Koid,
214 added_partitions: Vec<u32>,
217 _signal_task: fasync::Task<()>,
219}
220
221struct Inner {
222 gpt: gpt::Gpt,
223 partitions: BTreeMap<u32, Arc<BlockServer<SessionManager<PartitionBackend>>>>,
224 partitions_dir: PartitionsDirectory,
227 pending_transaction: Option<PendingTransaction>,
228}
229
230impl Inner {
231 fn ensure_transaction_matches(&self, transaction: &zx::EventPair) -> Result<(), zx::Status> {
233 if let Some(pending) = self.pending_transaction.as_ref() {
234 if transaction.get_koid()? == pending.client_koid {
235 Ok(())
236 } else {
237 Err(zx::Status::BAD_HANDLE)
238 }
239 } else {
240 Err(zx::Status::BAD_STATE)
241 }
242 }
243
244 async fn bind_partition(
245 &mut self,
246 parent: &Arc<GptManager>,
247 index: u32,
248 info: gpt::PartitionInfo,
249 ) -> Result<(), Error> {
250 log::info!("GPT part {index}: {info:?}");
251 let partition = PartitionBackend::new(GptPartition::new(
252 parent,
253 self.gpt.client().clone(),
254 index,
255 info.start_block
256 ..info
257 .start_block
258 .checked_add(info.num_blocks)
259 .ok_or_else(|| anyhow!("Overflow in partition range"))?,
260 ));
261 let block_server = Arc::new(BlockServer::new(parent.block_size, partition));
262 self.partitions_dir.add_entry(
263 &partition_directory_entry_name(index),
264 Arc::downgrade(&block_server),
265 Arc::downgrade(parent),
266 index as usize,
267 );
268 self.partitions.insert(index, block_server);
269 Ok(())
270 }
271
272 async fn bind_all_partitions(&mut self, parent: &Arc<GptManager>) -> Result<(), Error> {
273 self.partitions.clear();
274 self.partitions_dir.clear();
275 for (index, info) in self.gpt.partitions().clone() {
276 self.bind_partition(parent, index, info).await?;
277 }
278 Ok(())
279 }
280
281 fn add_partition(&mut self, info: gpt::PartitionInfo) -> Result<usize, gpt::AddPartitionError> {
282 let pending = self.pending_transaction.as_mut().unwrap();
283 let idx = self.gpt.add_partition(&mut pending.transaction, info)?;
284 pending.added_partitions.push(idx as u32);
285 Ok(idx)
286 }
287}
288
289pub struct GptManager {
291 block_proxy: fblock::BlockProxy,
292 block_size: u32,
293 block_count: u64,
294 inner: Mutex<Inner>,
295 shutdown: AtomicBool,
296}
297
298impl std::fmt::Debug for GptManager {
299 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
300 f.debug_struct("GptManager")
301 .field("block_size", &self.block_size)
302 .field("block_count", &self.block_count)
303 .finish()
304 }
305}
306
307impl GptManager {
308 pub async fn new(
309 block_proxy: fblock::BlockProxy,
310 partitions_dir: Arc<vfs::directory::immutable::Simple>,
311 ) -> Result<Arc<Self>, Error> {
312 log::info!("Binding to GPT");
313 let client = Arc::new(RemoteBlockClient::new(block_proxy.clone()).await?);
314 let block_size = client.block_size();
315 let block_count = client.block_count();
316 let gpt = gpt::Gpt::open(client).await.context("Failed to load GPT")?;
317
318 let this = Arc::new(Self {
319 block_proxy,
320 block_size,
321 block_count,
322 inner: Mutex::new(Inner {
323 gpt,
324 partitions: BTreeMap::new(),
325 partitions_dir: PartitionsDirectory::new(partitions_dir),
326 pending_transaction: None,
327 }),
328 shutdown: AtomicBool::new(false),
329 });
330 log::info!("Bind to GPT OK, binding partitions");
331 this.inner.lock().await.bind_all_partitions(&this).await?;
332 log::info!("Starting all partitions OK!");
333 Ok(this)
334 }
335
336 pub fn block_size(&self) -> u32 {
337 self.block_size
338 }
339
340 pub fn block_count(&self) -> u64 {
341 self.block_count
342 }
343
344 pub async fn create_transaction(self: &Arc<Self>) -> Result<zx::EventPair, zx::Status> {
345 let mut inner = self.inner.lock().await;
346 if inner.pending_transaction.is_some() {
347 return Err(zx::Status::ALREADY_EXISTS);
348 }
349 let transaction = inner.gpt.create_transaction().unwrap();
350 let (client_end, server_end) = zx::EventPair::create();
351 let client_koid = client_end.get_koid()?;
352 let signal_waiter = fasync::OnSignals::new(server_end, zx::Signals::EVENTPAIR_PEER_CLOSED);
353 let this = self.clone();
354 let task = fasync::Task::spawn(async move {
355 let _ = signal_waiter.await;
356 let mut inner = this.inner.lock().await;
357 if inner.pending_transaction.as_ref().map_or(false, |t| t.client_koid == client_koid) {
358 inner.pending_transaction = None;
359 }
360 });
361 inner.pending_transaction = Some(PendingTransaction {
362 transaction,
363 client_koid,
364 added_partitions: vec![],
365 _signal_task: task,
366 });
367 Ok(client_end)
368 }
369
370 pub async fn commit_transaction(
371 self: &Arc<Self>,
372 transaction: zx::EventPair,
373 ) -> Result<(), zx::Status> {
374 let mut inner = self.inner.lock().await;
375 inner.ensure_transaction_matches(&transaction)?;
376 let pending = std::mem::take(&mut inner.pending_transaction).unwrap();
377 if let Err(err) = inner.gpt.commit_transaction(pending.transaction).await {
378 log::error!(err:?; "Failed to commit transaction");
379 return Err(zx::Status::IO);
380 }
381 for idx in pending.added_partitions {
382 let info = inner.gpt.partitions().get(&idx).ok_or(zx::Status::BAD_STATE)?.clone();
383 inner.bind_partition(self, idx, info).await.map_err(|err| {
384 log::error!(err:?; "Failed to bind partition");
385 zx::Status::BAD_STATE
386 })?;
387 }
388 Ok(())
389 }
390
391 pub async fn add_partition(
392 &self,
393 request: fpartitions::PartitionsManagerAddPartitionRequest,
394 ) -> Result<(), zx::Status> {
395 let mut inner = self.inner.lock().await;
396 inner.ensure_transaction_matches(
397 request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
398 )?;
399 let info = gpt::PartitionInfo {
400 label: request.name.ok_or(zx::Status::INVALID_ARGS)?,
401 type_guid: request
402 .type_guid
403 .map(|value| gpt::Guid::from_bytes(value.value))
404 .ok_or(zx::Status::INVALID_ARGS)?,
405 instance_guid: request
406 .instance_guid
407 .map(|value| gpt::Guid::from_bytes(value.value))
408 .unwrap_or_else(|| gpt::Guid::generate()),
409 start_block: 0,
410 num_blocks: request.num_blocks.ok_or(zx::Status::INVALID_ARGS)?,
411 flags: request.flags.unwrap_or_default(),
412 };
413 let idx = inner.add_partition(info)?;
414 let partition =
415 inner.pending_transaction.as_ref().unwrap().transaction.partitions.get(idx).unwrap();
416 log::info!(
417 "Allocated partition {:?} at {:?}",
418 partition.label,
419 partition.start_block..partition.start_block + partition.num_blocks
420 );
421 Ok(())
422 }
423
424 pub async fn handle_partitions_requests(
425 &self,
426 gpt_index: usize,
427 mut requests: fpartitions::PartitionRequestStream,
428 ) -> Result<(), zx::Status> {
429 while let Some(request) = requests.try_next().await.unwrap() {
430 match request {
431 fpartitions::PartitionRequest::UpdateMetadata { payload, responder } => {
432 responder
433 .send(
434 self.update_partition_metadata(gpt_index, payload)
435 .await
436 .map_err(|status| status.into_raw()),
437 )
438 .unwrap_or_else(
439 |err| log::error!(err:?; "Failed to send UpdateMetadata response"),
440 );
441 }
442 }
443 }
444 Ok(())
445 }
446
447 async fn update_partition_metadata(
448 &self,
449 gpt_index: usize,
450 request: fpartitions::PartitionUpdateMetadataRequest,
451 ) -> Result<(), zx::Status> {
452 let mut inner = self.inner.lock().await;
453 inner.ensure_transaction_matches(
454 request.transaction.as_ref().ok_or(zx::Status::BAD_HANDLE)?,
455 )?;
456
457 let transaction = &mut inner.pending_transaction.as_mut().unwrap().transaction;
458 let entry = transaction.partitions.get_mut(gpt_index).ok_or(zx::Status::BAD_STATE)?;
459 if let Some(type_guid) = request.type_guid.as_ref().cloned() {
460 entry.type_guid = gpt::Guid::from_bytes(type_guid.value);
461 }
462 if let Some(flags) = request.flags.as_ref() {
463 entry.flags = *flags;
464 }
465 Ok(())
466 }
467
468 pub async fn reset_partition_table(
469 self: &Arc<Self>,
470 partitions: Vec<gpt::PartitionInfo>,
471 ) -> Result<(), zx::Status> {
472 let mut inner = self.inner.lock().await;
473 if inner.pending_transaction.is_some() {
474 return Err(zx::Status::BAD_STATE);
475 }
476
477 log::info!("Resetting gpt. Expect data loss!!!");
478 let mut transaction = inner.gpt.create_transaction().unwrap();
479 transaction.partitions = partitions;
480 inner.gpt.commit_transaction(transaction).await?;
481
482 log::info!("Rebinding partitions...");
483 if let Err(err) = inner.bind_all_partitions(&self).await {
484 log::error!(err:?; "Failed to rebind partitions");
485 return Err(zx::Status::BAD_STATE);
486 }
487 log::info!("Rebinding partitions OK!");
488 Ok(())
489 }
490
491 pub async fn shutdown(self: Arc<Self>) {
492 log::info!("Shutting down gpt");
493 let mut inner = self.inner.lock().await;
494 inner.partitions_dir.clear();
495 inner.partitions.clear();
496 self.shutdown.store(true, Ordering::Relaxed);
497 log::info!("Shutting down gpt OK");
498 }
499}
500
501impl Drop for GptManager {
502 fn drop(&mut self) {
503 assert!(self.shutdown.load(Ordering::Relaxed), "Did you forget to shutdown?");
504 }
505}
506
507#[cfg(test)]
508mod tests {
509 use super::GptManager;
510 use block_client::{BlockClient as _, BufferSlice, MutableBufferSlice, RemoteBlockClient};
511 use block_server::WriteOptions;
512 use fake_block_server::{FakeServer, FakeServerOptions};
513 use fidl::HandleBased as _;
514 use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
515 use gpt::{Gpt, Guid, PartitionInfo};
516 use std::num::NonZero;
517 use std::sync::atomic::{AtomicBool, Ordering};
518 use std::sync::Arc;
519 use {
520 fidl_fuchsia_hardware_block as fblock, fidl_fuchsia_hardware_block_volume as fvolume,
521 fidl_fuchsia_io as fio, fidl_fuchsia_storage_partitions as fpartitions,
522 fuchsia_async as fasync,
523 };
524
525 async fn setup(
526 block_size: u32,
527 block_count: u64,
528 partitions: Vec<PartitionInfo>,
529 ) -> (Arc<FakeServer>, Arc<vfs::directory::immutable::Simple>) {
530 setup_with_options(
531 FakeServerOptions { block_count: Some(block_count), block_size, ..Default::default() },
532 partitions,
533 )
534 .await
535 }
536
537 async fn setup_with_options(
538 opts: FakeServerOptions<'_>,
539 partitions: Vec<PartitionInfo>,
540 ) -> (Arc<FakeServer>, Arc<vfs::directory::immutable::Simple>) {
541 let server = Arc::new(FakeServer::from(opts));
542 {
543 let (block_client, block_server) =
544 fidl::endpoints::create_proxy::<fblock::BlockMarker>();
545 let volume_stream = fidl::endpoints::ServerEnd::<fvolume::VolumeMarker>::from(
546 block_server.into_channel(),
547 )
548 .into_stream();
549 let server_clone = server.clone();
550 let _task = fasync::Task::spawn(async move { server_clone.serve(volume_stream).await });
551 let client = Arc::new(RemoteBlockClient::new(block_client).await.unwrap());
552 Gpt::format(client, partitions).await.unwrap();
553 }
554 (server, vfs::directory::immutable::simple())
555 }
556
557 #[fuchsia::test]
558 async fn load_unformatted_gpt() {
559 let vmo = zx::Vmo::create(4096).unwrap();
560 let server = Arc::new(FakeServer::from_vmo(512, vmo));
561
562 GptManager::new(server.block_proxy(), vfs::directory::immutable::simple())
563 .await
564 .expect_err("load should fail");
565 }
566
567 #[fuchsia::test]
568 async fn load_formatted_empty_gpt() {
569 let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
570
571 let runner = GptManager::new(block_device.block_proxy(), partitions_dir)
572 .await
573 .expect("load should succeed");
574 runner.shutdown().await;
575 }
576
577 #[fuchsia::test]
578 async fn load_formatted_gpt_with_one_partition() {
579 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
580 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
581 const PART_NAME: &str = "part";
582
583 let (block_device, partitions_dir) = setup(
584 512,
585 8,
586 vec![PartitionInfo {
587 label: PART_NAME.to_string(),
588 type_guid: Guid::from_bytes(PART_TYPE_GUID),
589 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
590 start_block: 4,
591 num_blocks: 1,
592 flags: 0,
593 }],
594 )
595 .await;
596
597 let partitions_dir_clone = partitions_dir.clone();
598 let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
599 .await
600 .expect("load should succeed");
601 partitions_dir.get_entry("part-000").expect("No entry found");
602 partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
603 runner.shutdown().await;
604 }
605
606 #[fuchsia::test]
607 async fn load_formatted_gpt_with_two_partitions() {
608 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
609 const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
610 const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
611 const PART_1_NAME: &str = "part1";
612 const PART_2_NAME: &str = "part2";
613
614 let (block_device, partitions_dir) = setup(
615 512,
616 8,
617 vec![
618 PartitionInfo {
619 label: PART_1_NAME.to_string(),
620 type_guid: Guid::from_bytes(PART_TYPE_GUID),
621 instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
622 start_block: 4,
623 num_blocks: 1,
624 flags: 0,
625 },
626 PartitionInfo {
627 label: PART_2_NAME.to_string(),
628 type_guid: Guid::from_bytes(PART_TYPE_GUID),
629 instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
630 start_block: 5,
631 num_blocks: 1,
632 flags: 0,
633 },
634 ],
635 )
636 .await;
637
638 let partitions_dir_clone = partitions_dir.clone();
639 let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
640 .await
641 .expect("load should succeed");
642 partitions_dir.get_entry("part-000").expect("No entry found");
643 partitions_dir.get_entry("part-001").expect("No entry found");
644 partitions_dir.get_entry("part-002").map(|_| ()).expect_err("Extra entry found");
645 runner.shutdown().await;
646 }
647
648 #[fuchsia::test]
649 async fn partition_io() {
650 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
651 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
652 const PART_NAME: &str = "part";
653
654 let (block_device, partitions_dir) = setup(
655 512,
656 8,
657 vec![PartitionInfo {
658 label: PART_NAME.to_string(),
659 type_guid: Guid::from_bytes(PART_TYPE_GUID),
660 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
661 start_block: 4,
662 num_blocks: 2,
663 flags: 0,
664 }],
665 )
666 .await;
667
668 let partitions_dir_clone = partitions_dir.clone();
669 let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
670 .await
671 .expect("load should succeed");
672
673 let proxy = vfs::serve_directory(
674 partitions_dir.clone(),
675 vfs::path::Path::validate_and_split("part-000").unwrap(),
676 fio::PERM_READABLE,
677 );
678 let block =
679 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
680 .expect("Failed to open block service");
681 let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
682
683 assert_eq!(client.block_count(), 2);
684 assert_eq!(client.block_size(), 512);
685
686 let buf = vec![0xabu8; 512];
687 client.write_at(BufferSlice::Memory(&buf[..]), 0).await.expect("write_at failed");
688 client
689 .write_at(BufferSlice::Memory(&buf[..]), 1024)
690 .await
691 .expect_err("write_at should fail when writing past partition end");
692 let mut buf2 = vec![0u8; 512];
693 client.read_at(MutableBufferSlice::Memory(&mut buf2[..]), 0).await.expect("read_at failed");
694 assert_eq!(buf, buf2);
695 client
696 .read_at(MutableBufferSlice::Memory(&mut buf2[..]), 1024)
697 .await
698 .expect_err("read_at should fail when reading past partition end");
699 client.trim(512..1024).await.expect("trim failed");
700 client.trim(1..512).await.expect_err("trim with invalid range should fail");
701 client.trim(1024..1536).await.expect_err("trim past end of partition should fail");
702 runner.shutdown().await;
703
704 let mut buf = vec![0u8; 512];
706 let client = RemoteBlockClient::new(block_device.block_proxy()).await.unwrap();
707 client.read_at(MutableBufferSlice::Memory(&mut buf[..]), 2048).await.unwrap();
708 assert_eq!(&buf[..], &[0xabu8; 512]);
709 }
710
711 #[fuchsia::test]
712 async fn load_formatted_gpt_with_invalid_primary_header() {
713 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
714 const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
715 const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
716 const PART_1_NAME: &str = "part1";
717 const PART_2_NAME: &str = "part2";
718
719 let (block_device, partitions_dir) = setup(
720 512,
721 8,
722 vec![
723 PartitionInfo {
724 label: PART_1_NAME.to_string(),
725 type_guid: Guid::from_bytes(PART_TYPE_GUID),
726 instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
727 start_block: 4,
728 num_blocks: 1,
729 flags: 0,
730 },
731 PartitionInfo {
732 label: PART_2_NAME.to_string(),
733 type_guid: Guid::from_bytes(PART_TYPE_GUID),
734 instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
735 start_block: 5,
736 num_blocks: 1,
737 flags: 0,
738 },
739 ],
740 )
741 .await;
742 {
743 let (client, stream) =
744 fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
745 let server = block_device.clone();
746 let _task = fasync::Task::spawn(async move { server.serve(stream).await });
747 let client = RemoteBlockClient::new(client).await.unwrap();
748 client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 512).await.unwrap();
749 }
750
751 let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
752 .await
753 .expect("load should succeed");
754 partitions_dir.get_entry("part-000").expect("No entry found");
755 partitions_dir.get_entry("part-001").expect("No entry found");
756 runner.shutdown().await;
757 }
758
759 #[fuchsia::test]
760 async fn load_formatted_gpt_with_invalid_primary_partition_table() {
761 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
762 const PART_INSTANCE_1_GUID: [u8; 16] = [2u8; 16];
763 const PART_INSTANCE_2_GUID: [u8; 16] = [3u8; 16];
764 const PART_1_NAME: &str = "part1";
765 const PART_2_NAME: &str = "part2";
766
767 let (block_device, partitions_dir) = setup(
768 512,
769 8,
770 vec![
771 PartitionInfo {
772 label: PART_1_NAME.to_string(),
773 type_guid: Guid::from_bytes(PART_TYPE_GUID),
774 instance_guid: Guid::from_bytes(PART_INSTANCE_1_GUID),
775 start_block: 4,
776 num_blocks: 1,
777 flags: 0,
778 },
779 PartitionInfo {
780 label: PART_2_NAME.to_string(),
781 type_guid: Guid::from_bytes(PART_TYPE_GUID),
782 instance_guid: Guid::from_bytes(PART_INSTANCE_2_GUID),
783 start_block: 5,
784 num_blocks: 1,
785 flags: 0,
786 },
787 ],
788 )
789 .await;
790 {
791 let (client, stream) =
792 fidl::endpoints::create_proxy_and_stream::<fvolume::VolumeMarker>();
793 let server = block_device.clone();
794 let _task = fasync::Task::spawn(async move { server.serve(stream).await });
795 let client = RemoteBlockClient::new(client).await.unwrap();
796 client.write_at(BufferSlice::Memory(&[0xffu8; 512]), 1024).await.unwrap();
797 }
798
799 let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
800 .await
801 .expect("load should succeed");
802 partitions_dir.get_entry("part-000").expect("No entry found");
803 partitions_dir.get_entry("part-001").expect("No entry found");
804 runner.shutdown().await;
805 }
806
807 #[fuchsia::test]
808 async fn force_access_passed_through() {
809 const BLOCK_SIZE: u32 = 512;
810 const BLOCK_COUNT: u64 = 1024;
811
812 struct Observer(Arc<AtomicBool>);
813
814 impl fake_block_server::Observer for Observer {
815 fn write(
816 &self,
817 _device_block_offset: u64,
818 _block_count: u32,
819 _vmo: &Arc<zx::Vmo>,
820 _vmo_offset: u64,
821 opts: WriteOptions,
822 ) -> fake_block_server::WriteAction {
823 assert_eq!(
824 opts.contains(WriteOptions::FORCE_ACCESS),
825 self.0.load(Ordering::Relaxed)
826 );
827 fake_block_server::WriteAction::Write
828 }
829 }
830
831 let expect_force_access = Arc::new(AtomicBool::new(false));
832 let (server, partitions_dir) = setup_with_options(
833 FakeServerOptions {
834 block_count: Some(BLOCK_COUNT),
835 block_size: BLOCK_SIZE,
836 observer: Some(Box::new(Observer(expect_force_access.clone()))),
837 ..Default::default()
838 },
839 vec![PartitionInfo {
840 label: "foo".to_string(),
841 type_guid: Guid::from_bytes([1; 16]),
842 instance_guid: Guid::from_bytes([2; 16]),
843 start_block: 4,
844 num_blocks: 1,
845 flags: 0,
846 }],
847 )
848 .await;
849
850 let manager = GptManager::new(server.block_proxy(), partitions_dir.clone()).await.unwrap();
851
852 let proxy = vfs::serve_directory(
853 partitions_dir.clone(),
854 vfs::path::Path::validate_and_split("part-000").unwrap(),
855 fio::PERM_READABLE,
856 );
857 let block =
858 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
859 .expect("Failed to open block service");
860 let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
861
862 let buffer = vec![0; BLOCK_SIZE as usize];
863 client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
864
865 expect_force_access.store(true, Ordering::Relaxed);
866
867 client
868 .write_at_with_opts(BufferSlice::Memory(&buffer), 0, WriteOptions::FORCE_ACCESS)
869 .await
870 .unwrap();
871
872 manager.shutdown().await;
873 }
874
875 #[fuchsia::test]
876 async fn commit_transaction() {
877 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
878 const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
879 const PART_1_NAME: &str = "part";
880 const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
881 const PART_2_NAME: &str = "part2";
882
883 let (block_device, partitions_dir) = setup(
884 512,
885 16,
886 vec![
887 PartitionInfo {
888 label: PART_1_NAME.to_string(),
889 type_guid: Guid::from_bytes(PART_TYPE_GUID),
890 instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
891 start_block: 4,
892 num_blocks: 1,
893 flags: 0,
894 },
895 PartitionInfo {
896 label: PART_2_NAME.to_string(),
897 type_guid: Guid::from_bytes(PART_TYPE_GUID),
898 instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
899 start_block: 5,
900 num_blocks: 1,
901 flags: 0,
902 },
903 ],
904 )
905 .await;
906 let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
907 .await
908 .expect("load should succeed");
909
910 let part_0_dir = vfs::serve_directory(
911 partitions_dir.clone(),
912 vfs::Path::validate_and_split("part-000").unwrap(),
913 fio::PERM_READABLE,
914 );
915 let part_1_dir = vfs::serve_directory(
916 partitions_dir.clone(),
917 vfs::Path::validate_and_split("part-001").unwrap(),
918 fio::PERM_READABLE,
919 );
920 let part_0_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
921 &part_0_dir,
922 "partition",
923 )
924 .expect("Failed to open Partition service");
925 let part_1_proxy = connect_to_named_protocol_at_dir_root::<fpartitions::PartitionMarker>(
926 &part_1_dir,
927 "partition",
928 )
929 .expect("Failed to open Partition service");
930
931 let transaction = runner.create_transaction().await.expect("Failed to create transaction");
932 part_0_proxy
933 .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
934 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
935 type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid {
936 value: [0xffu8; 16],
937 }),
938 ..Default::default()
939 })
940 .await
941 .expect("FIDL error")
942 .expect("Failed to update_metadata");
943 part_1_proxy
944 .update_metadata(fpartitions::PartitionUpdateMetadataRequest {
945 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
946 flags: Some(1234),
947 ..Default::default()
948 })
949 .await
950 .expect("FIDL error")
951 .expect("Failed to update_metadata");
952 runner.commit_transaction(transaction).await.expect("Failed to commit transaction");
953
954 let part_0_block =
956 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_0_dir, "volume")
957 .expect("Failed to open Volume service");
958 let (status, guid) = part_0_block.get_type_guid().await.expect("FIDL error");
959 assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
960 assert_eq!(guid.unwrap().value, [0xffu8; 16]);
961 let part_1_block =
962 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_1_dir, "volume")
963 .expect("Failed to open Volume service");
964 let metadata =
965 part_1_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
966 assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
967 assert_eq!(metadata.flags, Some(1234));
968
969 runner.shutdown().await;
970 }
971
972 #[fuchsia::test]
973 async fn reset_partition_tables() {
974 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
977 const PART_1_INSTANCE_GUID: [u8; 16] = [2u8; 16];
978 const PART_1_NAME: &str = "part";
979 const PART_2_INSTANCE_GUID: [u8; 16] = [3u8; 16];
980 const PART_2_NAME: &str = "part2";
981 const PART_3_NAME: &str = "part3";
982 const PART_4_NAME: &str = "part4";
983
984 let (block_device, partitions_dir) = setup(
985 512,
986 1048576 / 512,
987 vec![
988 PartitionInfo {
989 label: PART_1_NAME.to_string(),
990 type_guid: Guid::from_bytes(PART_TYPE_GUID),
991 instance_guid: Guid::from_bytes(PART_1_INSTANCE_GUID),
992 start_block: 4,
993 num_blocks: 1,
994 flags: 0,
995 },
996 PartitionInfo {
997 label: PART_2_NAME.to_string(),
998 type_guid: Guid::from_bytes(PART_TYPE_GUID),
999 instance_guid: Guid::from_bytes(PART_2_INSTANCE_GUID),
1000 start_block: 5,
1001 num_blocks: 1,
1002 flags: 0,
1003 },
1004 ],
1005 )
1006 .await;
1007 let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1008 .await
1009 .expect("load should succeed");
1010 let nil_entry = PartitionInfo {
1011 label: "".to_string(),
1012 type_guid: Guid::from_bytes([0u8; 16]),
1013 instance_guid: Guid::from_bytes([0u8; 16]),
1014 start_block: 0,
1015 num_blocks: 0,
1016 flags: 0,
1017 };
1018 let mut new_partitions = vec![nil_entry; 128];
1019 new_partitions[0] = PartitionInfo {
1020 label: PART_3_NAME.to_string(),
1021 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1022 instance_guid: Guid::from_bytes([1u8; 16]),
1023 start_block: 64,
1024 num_blocks: 2,
1025 flags: 0,
1026 };
1027 new_partitions[2] = PartitionInfo {
1028 label: PART_4_NAME.to_string(),
1029 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1030 instance_guid: Guid::from_bytes([2u8; 16]),
1031 start_block: 66,
1032 num_blocks: 4,
1033 flags: 0,
1034 };
1035 runner.reset_partition_table(new_partitions).await.expect("reset_partition_table failed");
1036 partitions_dir.get_entry("part-000").expect("No entry found");
1037 partitions_dir.get_entry("part-001").map(|_| ()).expect_err("Extra entry found");
1038 partitions_dir.get_entry("part-002").expect("No entry found");
1039
1040 let proxy = vfs::serve_directory(
1041 partitions_dir.clone(),
1042 vfs::path::Path::validate_and_split("part-000").unwrap(),
1043 fio::PERM_READABLE,
1044 );
1045 let block =
1046 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1047 .expect("Failed to open block service");
1048 let (status, name) = block.get_name().await.expect("FIDL error");
1049 assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
1050 assert_eq!(name.unwrap(), PART_3_NAME);
1051
1052 runner.shutdown().await;
1053 }
1054
1055 #[fuchsia::test]
1056 async fn reset_partition_tables_fails_if_too_many_partitions() {
1057 let (block_device, partitions_dir) = setup(512, 8, vec![]).await;
1058 let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1059 .await
1060 .expect("load should succeed");
1061 let nil_entry = PartitionInfo {
1062 label: "".to_string(),
1063 type_guid: Guid::from_bytes([0u8; 16]),
1064 instance_guid: Guid::from_bytes([0u8; 16]),
1065 start_block: 0,
1066 num_blocks: 0,
1067 flags: 0,
1068 };
1069 let new_partitions = vec![nil_entry; 128];
1070 runner
1071 .reset_partition_table(new_partitions)
1072 .await
1073 .expect_err("reset_partition_table should fail");
1074
1075 runner.shutdown().await;
1076 }
1077
1078 #[fuchsia::test]
1079 async fn reset_partition_tables_fails_if_too_large_partitions() {
1080 let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1081 let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1082 .await
1083 .expect("load should succeed");
1084 let new_partitions = vec![
1085 PartitionInfo {
1086 label: "a".to_string(),
1087 type_guid: Guid::from_bytes([1u8; 16]),
1088 instance_guid: Guid::from_bytes([1u8; 16]),
1089 start_block: 4,
1090 num_blocks: 2,
1091 flags: 0,
1092 },
1093 PartitionInfo {
1094 label: "b".to_string(),
1095 type_guid: Guid::from_bytes([2u8; 16]),
1096 instance_guid: Guid::from_bytes([2u8; 16]),
1097 start_block: 6,
1098 num_blocks: 200,
1099 flags: 0,
1100 },
1101 ];
1102 runner
1103 .reset_partition_table(new_partitions)
1104 .await
1105 .expect_err("reset_partition_table should fail");
1106
1107 runner.shutdown().await;
1108 }
1109
1110 #[fuchsia::test]
1111 async fn reset_partition_tables_fails_if_partition_overlaps_metadata() {
1112 let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1113 let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1114 .await
1115 .expect("load should succeed");
1116 let new_partitions = vec![PartitionInfo {
1117 label: "a".to_string(),
1118 type_guid: Guid::from_bytes([1u8; 16]),
1119 instance_guid: Guid::from_bytes([1u8; 16]),
1120 start_block: 1,
1121 num_blocks: 2,
1122 flags: 0,
1123 }];
1124 runner
1125 .reset_partition_table(new_partitions)
1126 .await
1127 .expect_err("reset_partition_table should fail");
1128
1129 runner.shutdown().await;
1130 }
1131
1132 #[fuchsia::test]
1133 async fn reset_partition_tables_fails_if_partitions_overlap() {
1134 let (block_device, partitions_dir) = setup(512, 64, vec![]).await;
1135 let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1136 .await
1137 .expect("load should succeed");
1138 let new_partitions = vec![
1139 PartitionInfo {
1140 label: "a".to_string(),
1141 type_guid: Guid::from_bytes([1u8; 16]),
1142 instance_guid: Guid::from_bytes([1u8; 16]),
1143 start_block: 32,
1144 num_blocks: 2,
1145 flags: 0,
1146 },
1147 PartitionInfo {
1148 label: "b".to_string(),
1149 type_guid: Guid::from_bytes([2u8; 16]),
1150 instance_guid: Guid::from_bytes([2u8; 16]),
1151 start_block: 33,
1152 num_blocks: 1,
1153 flags: 0,
1154 },
1155 ];
1156 runner
1157 .reset_partition_table(new_partitions)
1158 .await
1159 .expect_err("reset_partition_table should fail");
1160
1161 runner.shutdown().await;
1162 }
1163
1164 #[fuchsia::test]
1165 async fn add_partition() {
1166 let (block_device, partitions_dir) = setup(512, 64, vec![PartitionInfo::nil(); 64]).await;
1167 let runner = GptManager::new(block_device.block_proxy(), partitions_dir.clone())
1168 .await
1169 .expect("load should succeed");
1170
1171 let transaction = runner.create_transaction().await.expect("Create transaction failed");
1172 let request = fpartitions::PartitionsManagerAddPartitionRequest {
1173 transaction: Some(transaction.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()),
1174 name: Some("a".to_string()),
1175 type_guid: Some(fidl_fuchsia_hardware_block_partition::Guid { value: [1u8; 16] }),
1176 num_blocks: Some(2),
1177 ..Default::default()
1178 };
1179 runner.add_partition(request).await.expect("add_partition failed");
1180 runner.commit_transaction(transaction).await.expect("add_partition failed");
1181
1182 let proxy = vfs::serve_directory(
1183 partitions_dir.clone(),
1184 vfs::path::Path::validate_and_split("part-000").unwrap(),
1185 fio::PERM_READABLE,
1186 );
1187 let block =
1188 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&proxy, "volume")
1189 .expect("Failed to open block service");
1190 let client = RemoteBlockClient::new(block).await.expect("Failed to create block client");
1191
1192 assert_eq!(client.block_count(), 2);
1193 assert_eq!(client.block_size(), 512);
1194
1195 runner.shutdown().await;
1196 }
1197
1198 #[fuchsia::test]
1199 async fn partition_info() {
1200 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1201 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1202 const PART_NAME: &str = "part";
1203
1204 let (block_device, partitions_dir) = setup_with_options(
1205 FakeServerOptions {
1206 block_count: Some(8),
1207 block_size: 512,
1208 max_transfer_blocks: NonZero::new(2),
1209 flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1210 ..Default::default()
1211 },
1212 vec![PartitionInfo {
1213 label: PART_NAME.to_string(),
1214 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1215 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1216 start_block: 4,
1217 num_blocks: 1,
1218 flags: 0xabcd,
1219 }],
1220 )
1221 .await;
1222
1223 let partitions_dir_clone = partitions_dir.clone();
1224 let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
1225 .await
1226 .expect("load should succeed");
1227
1228 let part_dir = vfs::serve_directory(
1229 partitions_dir.clone(),
1230 vfs::path::Path::validate_and_split("part-000").unwrap(),
1231 fio::PERM_READABLE,
1232 );
1233 let part_block =
1234 connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(&part_dir, "volume")
1235 .expect("Failed to open Volume service");
1236 let info = part_block.get_info().await.expect("FIDL error").expect("get_info failed");
1237 assert_eq!(info.block_count, 1);
1238 assert_eq!(info.block_size, 512);
1239 assert_eq!(info.flags, fblock::Flag::READONLY | fblock::Flag::REMOVABLE);
1240 assert_eq!(info.max_transfer_size, 1024);
1241
1242 let metadata =
1243 part_block.get_metadata().await.expect("FIDL error").expect("get_metadata failed");
1244 assert_eq!(metadata.name, Some(PART_NAME.to_string()));
1245 assert_eq!(metadata.type_guid.unwrap().value, PART_TYPE_GUID);
1246 assert_eq!(metadata.instance_guid.unwrap().value, PART_INSTANCE_GUID);
1247 assert_eq!(metadata.start_block_offset, Some(4));
1248 assert_eq!(metadata.num_blocks, Some(1));
1249 assert_eq!(metadata.flags, Some(0xabcd));
1250
1251 runner.shutdown().await;
1252 }
1253
1254 #[fuchsia::test]
1255 async fn nested_gpt() {
1256 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1257 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1258 const PART_NAME: &str = "part";
1259
1260 let vmo = zx::Vmo::create(64 * 512).unwrap();
1261 let vmo_clone = vmo.create_child(zx::VmoChildOptions::REFERENCE, 0, 0).unwrap();
1262 let (outer_block_device, outer_partitions_dir) = setup_with_options(
1263 FakeServerOptions {
1264 vmo: Some(vmo_clone),
1265 block_size: 512,
1266 flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1267 ..Default::default()
1268 },
1269 vec![PartitionInfo {
1270 label: PART_NAME.to_string(),
1271 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1272 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1273 start_block: 4,
1274 num_blocks: 16,
1275 flags: 0xabcd,
1276 }],
1277 )
1278 .await;
1279
1280 let outer_partitions_dir_clone = outer_partitions_dir.clone();
1281 let outer_runner =
1282 GptManager::new(outer_block_device.block_proxy(), outer_partitions_dir_clone)
1283 .await
1284 .expect("load should succeed");
1285
1286 let outer_part_dir = vfs::serve_directory(
1287 outer_partitions_dir.clone(),
1288 vfs::path::Path::validate_and_split("part-000").unwrap(),
1289 fio::PERM_READABLE,
1290 );
1291 let part_block =
1292 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&outer_part_dir, "volume")
1293 .expect("Failed to open Block service");
1294
1295 let client = Arc::new(RemoteBlockClient::new(part_block.clone()).await.unwrap());
1296 let _ = gpt::Gpt::format(
1297 client,
1298 vec![PartitionInfo {
1299 label: PART_NAME.to_string(),
1300 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1301 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1302 start_block: 5,
1303 num_blocks: 1,
1304 flags: 0xabcd,
1305 }],
1306 )
1307 .await
1308 .unwrap();
1309
1310 let partitions_dir = vfs::directory::immutable::simple();
1311 let partitions_dir_clone = partitions_dir.clone();
1312 let runner =
1313 GptManager::new(part_block, partitions_dir_clone).await.expect("load should succeed");
1314 let part_dir = vfs::serve_directory(
1315 partitions_dir.clone(),
1316 vfs::path::Path::validate_and_split("part-000").unwrap(),
1317 fio::PERM_READABLE,
1318 );
1319 let inner_part_block =
1320 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1321 .expect("Failed to open Block service");
1322
1323 let client =
1324 RemoteBlockClient::new(inner_part_block).await.expect("Failed to create block client");
1325 assert_eq!(client.block_count(), 1);
1326 assert_eq!(client.block_size(), 512);
1327
1328 let buffer = vec![0xaa; 512];
1329 client.write_at(BufferSlice::Memory(&buffer), 0).await.unwrap();
1330 client
1331 .write_at(BufferSlice::Memory(&buffer), 512)
1332 .await
1333 .expect_err("Write past end should fail");
1334 client.flush().await.unwrap();
1335
1336 runner.shutdown().await;
1337 outer_runner.shutdown().await;
1338
1339 let data = vmo.read_to_vec(9 * 512, 512).unwrap();
1341 assert_eq!(&data[..], &buffer[..]);
1342 }
1343
1344 #[fuchsia::test]
1345 async fn offset_map_does_not_allow_partition_overwrite() {
1346 const PART_TYPE_GUID: [u8; 16] = [2u8; 16];
1347 const PART_INSTANCE_GUID: [u8; 16] = [2u8; 16];
1348 const PART_NAME: &str = "part";
1349
1350 let (block_device, partitions_dir) = setup_with_options(
1351 FakeServerOptions {
1352 block_count: Some(16),
1353 block_size: 512,
1354 flags: fblock::Flag::READONLY | fblock::Flag::REMOVABLE,
1355 ..Default::default()
1356 },
1357 vec![PartitionInfo {
1358 label: PART_NAME.to_string(),
1359 type_guid: Guid::from_bytes(PART_TYPE_GUID),
1360 instance_guid: Guid::from_bytes(PART_INSTANCE_GUID),
1361 start_block: 4,
1362 num_blocks: 2,
1363 flags: 0xabcd,
1364 }],
1365 )
1366 .await;
1367
1368 let partitions_dir_clone = partitions_dir.clone();
1369 let runner = GptManager::new(block_device.block_proxy(), partitions_dir_clone)
1370 .await
1371 .expect("load should succeed");
1372
1373 let part_dir = vfs::serve_directory(
1374 partitions_dir.clone(),
1375 vfs::path::Path::validate_and_split("part-000").unwrap(),
1376 fio::PERM_READABLE,
1377 );
1378
1379 let part_block =
1383 connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(&part_dir, "volume")
1384 .expect("Failed to open Block service");
1385 let info = part_block.get_info().await.expect("FIDL error").expect("get_info failed");
1386 let (session, server_end) = fidl::endpoints::create_proxy::<fblock::SessionMarker>();
1387 part_block
1388 .open_session_with_offset_map(
1389 server_end,
1390 None,
1391 Some(&[fblock::BlockOffsetMapping {
1392 source_block_offset: 0,
1393 target_block_offset: 1,
1394 length: 2,
1395 }]),
1396 )
1397 .expect("FIDL error");
1398
1399 let client = Arc::new(RemoteBlockClient::from_session(info, session).await.unwrap());
1400 let mut buffer = vec![0xaa; 512];
1401 client.flush().await.expect("Flush should succeed");
1402 client
1403 .read_at(MutableBufferSlice::Memory(&mut buffer), 0)
1404 .await
1405 .expect("Read should succeed");
1406 client.write_at(BufferSlice::Memory(&buffer), 0).await.expect("Write should succeed");
1407 client
1408 .read_at(MutableBufferSlice::Memory(&mut buffer), 512)
1409 .await
1410 .expect_err("Read past end should fail");
1411 client
1412 .write_at(BufferSlice::Memory(&buffer), 512)
1413 .await
1414 .expect_err("Write past end should fail");
1415
1416 runner.shutdown().await;
1417 }
1418}