fake_block_client/
fake_block_client.rs1use async_trait::async_trait;
6use block_client::{BlockClient, BufferSlice, MutableBufferSlice, VmoId, WriteOptions};
7use fidl_fuchsia_hardware_block as block;
8use fuchsia_sync::Mutex;
9use std::collections::BTreeMap;
10use std::num::NonZero;
11use std::ops::Range;
12use std::sync::atomic::{self, AtomicU32};
13
14type VmoRegistry = BTreeMap<u16, zx::Vmo>;
15
16struct Inner {
17 data: Vec<u8>,
18 vmo_registry: VmoRegistry,
19}
20
21pub struct FakeBlockClient {
23 inner: Mutex<Inner>,
24 block_size: u32,
25 flush_count: AtomicU32,
26}
27
28impl FakeBlockClient {
29 pub fn new(block_size: u32, block_count: usize) -> Self {
30 Self {
31 inner: Mutex::new(Inner {
32 data: vec![0 as u8; block_size as usize * block_count],
33 vmo_registry: BTreeMap::new(),
34 }),
35 block_size,
36 flush_count: AtomicU32::new(0),
37 }
38 }
39
40 pub fn flush_count(&self) -> u32 {
41 self.flush_count.load(atomic::Ordering::Relaxed)
42 }
43}
44
45#[async_trait]
46impl BlockClient for FakeBlockClient {
47 async fn attach_vmo(&self, vmo: &zx::Vmo) -> Result<VmoId, zx::Status> {
48 let len = vmo.get_size()?;
49 let vmo = vmo.create_child(zx::VmoChildOptions::SLICE, 0, len)?;
50 let mut inner = self.inner.lock();
51 for id in 1..u16::MAX {
53 if let std::collections::btree_map::Entry::Vacant(e) = inner.vmo_registry.entry(id) {
54 e.insert(vmo);
55 return Ok(VmoId::new(id));
56 }
57 }
58 Err(zx::Status::NO_RESOURCES)
59 }
60
61 async fn detach_vmo(&self, vmo_id: VmoId) -> Result<(), zx::Status> {
62 let mut inner = self.inner.lock();
63 let id = vmo_id.into_id();
64 if let None = inner.vmo_registry.remove(&id) {
65 Err(zx::Status::NOT_FOUND)
66 } else {
67 Ok(())
68 }
69 }
70
71 async fn read_at_traced(
72 &self,
73 buffer_slice: MutableBufferSlice<'_>,
74 device_offset: u64,
75 _trace_flow_id: u64,
76 ) -> Result<(), zx::Status> {
77 if device_offset % self.block_size as u64 != 0 {
78 return Err(zx::Status::INVALID_ARGS);
79 }
80 let device_offset = device_offset as usize;
81 let inner = &mut *self.inner.lock();
82 match buffer_slice {
83 MutableBufferSlice::VmoId { vmo_id, offset, length } => {
84 if offset % self.block_size as u64 != 0 {
85 return Err(zx::Status::INVALID_ARGS);
86 }
87 if length % self.block_size as u64 != 0 {
88 return Err(zx::Status::INVALID_ARGS);
89 }
90 let vmo = inner.vmo_registry.get(&vmo_id.id()).ok_or(zx::Status::INVALID_ARGS)?;
91 vmo.write(&inner.data[device_offset..device_offset + length as usize], offset)?;
92 Ok(())
93 }
94 MutableBufferSlice::Memory(slice) => {
95 let len = slice.len();
96 if device_offset + len > inner.data.len() {
97 return Err(zx::Status::OUT_OF_RANGE);
98 }
99 slice.copy_from_slice(&inner.data[device_offset..device_offset + len]);
100 Ok(())
101 }
102 }
103 }
104
105 async fn write_at_with_opts_traced(
106 &self,
107 buffer_slice: BufferSlice<'_>,
108 device_offset: u64,
109 _opts: WriteOptions,
110 _trace_flow_id: u64,
111 ) -> Result<(), zx::Status> {
112 if device_offset % self.block_size as u64 != 0 {
113 return Err(zx::Status::INVALID_ARGS);
114 }
115 let device_offset = device_offset as usize;
116 let inner = &mut *self.inner.lock();
117 match buffer_slice {
118 BufferSlice::VmoId { vmo_id, offset, length } => {
119 if offset % self.block_size as u64 != 0 {
120 return Err(zx::Status::INVALID_ARGS);
121 }
122 if length % self.block_size as u64 != 0 {
123 return Err(zx::Status::INVALID_ARGS);
124 }
125 let vmo = inner.vmo_registry.get(&vmo_id.id()).ok_or(zx::Status::INVALID_ARGS)?;
126 vmo.read(&mut inner.data[device_offset..device_offset + length as usize], offset)?;
127 Ok(())
128 }
129 BufferSlice::Memory(slice) => {
130 let len = slice.len();
131 if device_offset + len > inner.data.len() {
132 return Err(zx::Status::OUT_OF_RANGE);
133 }
134 inner.data[device_offset..device_offset + len].copy_from_slice(slice);
135 Ok(())
136 }
137 }
138 }
139
140 async fn trim_traced(&self, range: Range<u64>, _trace_flow_id: u64) -> Result<(), zx::Status> {
141 if range.start % self.block_size as u64 != 0 {
142 return Err(zx::Status::INVALID_ARGS);
143 }
144 if range.end % self.block_size as u64 != 0 {
145 return Err(zx::Status::INVALID_ARGS);
146 }
147 let inner = &mut *self.inner.lock();
149 if range.end as usize > inner.data.len() {
150 return Err(zx::Status::OUT_OF_RANGE);
151 }
152 inner.data[range.start as usize..range.end as usize].fill(0xab);
153 Ok(())
154 }
155
156 async fn flush_traced(&self, _trace_flow_id: u64) -> Result<(), zx::Status> {
157 self.flush_count.fetch_add(1, atomic::Ordering::Relaxed);
158 Ok(())
159 }
160
161 async fn close(&self) -> Result<(), zx::Status> {
162 Ok(())
163 }
164
165 fn block_size(&self) -> u32 {
166 self.block_size
167 }
168
169 fn block_count(&self) -> u64 {
170 self.inner.lock().data.len() as u64 / self.block_size as u64
171 }
172
173 fn max_transfer_blocks(&self) -> Option<NonZero<u32>> {
174 None
175 }
176
177 fn block_flags(&self) -> block::Flag {
178 block::Flag::TRIM_SUPPORT
179 }
180
181 fn is_connected(&self) -> bool {
182 true
183 }
184}