heapdump_vmo/
resources_table_v1.rs1use std::alloc::Layout;
6use std::mem::{align_of, size_of, size_of_val};
7
8use crate::memory_mapped_vmo::{MemoryMappable, MemoryMappedVmo};
9
10type Offset = u32;
12const OFFSET_INVALID: Offset = Offset::MAX;
13
14type StackTraceLength = u16;
17
18const NUM_STACK_BUCKETS: usize = 1 << 13;
24type StackBucketHeads = [Offset; NUM_STACK_BUCKETS];
25
26#[derive(Clone, Copy, Eq, Debug, Hash, Ord, PartialEq, PartialOrd)]
28#[repr(transparent)]
29pub struct ResourceKey(Offset);
30
31impl ResourceKey {
32 #[cfg(test)]
34 pub(crate) const fn from_raw(offset: Offset) -> ResourceKey {
35 ResourceKey(offset)
36 }
37
38 pub const fn into_raw(self) -> Offset {
39 self.0
40 }
41}
42
43#[repr(C)]
44#[derive(Debug)]
45pub struct ThreadInfo {
46 pub koid: zx::sys::zx_koid_t,
47 pub name: zx::Name,
48}
49
50unsafe impl MemoryMappable for ThreadInfo {}
52
53pub struct ResourcesTableWriter {
72 storage: MemoryMappedVmo,
73 watermark: usize, }
75
76impl ResourcesTableWriter {
77 pub fn new(vmo: &zx::Vmo) -> Result<ResourcesTableWriter, crate::Error> {
82 let storage = MemoryMappedVmo::new_readwrite(vmo)?;
83 if storage.vmo_size() < size_of::<StackBucketHeads>() {
84 return Err(crate::Error::BufferTooSmall);
85 } else if storage.vmo_size() - 1 > Offset::MAX as usize {
86 return Err(crate::Error::BufferTooBig);
87 }
88
89 let mut result = ResourcesTableWriter { storage, watermark: size_of::<StackBucketHeads>() };
90
91 for bucket_index in 0..NUM_STACK_BUCKETS {
93 *result.stack_bucket_head_at(bucket_index) = OFFSET_INVALID;
94 }
95
96 Ok(result)
97 }
98
99 fn allocate(&mut self, layout: Layout) -> Result<Offset, crate::Error> {
101 if layout.align() > zx::system_get_page_size() as usize {
104 return Err(crate::Error::InvalidInput);
105 }
106
107 let result_start = (self.watermark + layout.align() - 1) & !(layout.align() - 1);
108 let result_end = result_start + layout.size();
109
110 if result_end <= self.storage.vmo_size() {
111 self.watermark = result_end;
112 Ok(result_start as Offset)
113 } else {
114 Err(crate::Error::OutOfSpace)
115 }
116 }
117
118 fn compute_bucket_index(compressed_stack_trace: &[u8]) -> usize {
120 let tmp = crc::crc32::checksum_ieee(compressed_stack_trace);
121 tmp as usize % NUM_STACK_BUCKETS
122 }
123
124 fn stack_bucket_head_at(&mut self, bucket_index: usize) -> &mut Offset {
126 let bucket_heads = self.storage.get_object_mut::<StackBucketHeads>(0).unwrap();
128 &mut bucket_heads[bucket_index]
129 }
130
131 fn find_in_bucket(
134 &mut self,
135 bucket_index: usize,
136 compressed_stack_trace: &[u8],
137 ) -> Option<Offset> {
138 let mut curr = *self.stack_bucket_head_at(bucket_index);
139 while curr != OFFSET_INVALID {
140 let curr_next: Offset = *self.storage.get_object(curr as usize).unwrap();
143 let payload_offset = curr as usize + size_of_val(&curr_next);
144 let curr_payload = get_compressed_stack_trace(&self.storage, payload_offset).unwrap();
145
146 if *curr_payload == *compressed_stack_trace {
148 return Some(curr);
149 }
150
151 curr = curr_next;
152 }
153
154 None
156 }
157
158 fn insert_in_bucket(
159 &mut self,
160 bucket_index: usize,
161 compressed_stack_trace: &[u8],
162 ) -> Result<Offset, crate::Error> {
163 let alloc_bytes =
168 size_of::<Offset>() + size_of::<StackTraceLength>() + compressed_stack_trace.len();
169 let alloc_align = align_of::<Offset>();
170 let new = self.allocate(Layout::from_size_align(alloc_bytes, alloc_align).unwrap())?;
171
172 let old_head = *self.stack_bucket_head_at(bucket_index);
173
174 *self.storage.get_object_mut(new as usize).unwrap() = old_head;
176 set_compressed_stack_trace(
177 &mut self.storage,
178 new as usize + size_of::<Offset>(),
179 compressed_stack_trace,
180 )
181 .unwrap();
182
183 *self.stack_bucket_head_at(bucket_index) = new;
185
186 Ok(new)
187 }
188
189 pub fn intern_compressed_stack_trace(
194 &mut self,
195 compressed_stack_trace: &[u8],
196 ) -> Result<(ResourceKey, bool), crate::Error> {
197 if compressed_stack_trace.len() > StackTraceLength::MAX as usize {
199 return Err(crate::Error::BufferTooBig);
200 }
201
202 let bucket_index = Self::compute_bucket_index(compressed_stack_trace);
204 let (offset, inserted) = match self.find_in_bucket(bucket_index, compressed_stack_trace) {
205 Some(offset) => (offset, false),
206 None => (self.insert_in_bucket(bucket_index, compressed_stack_trace)?, true),
207 };
208
209 let resource_key = ResourceKey(offset + size_of::<Offset>() as Offset);
213 Ok((resource_key, inserted))
214 }
215
216 pub fn insert_thread_info(
218 &mut self,
219 koid: zx::sys::zx_koid_t,
220 name: &zx::Name,
221 ) -> Result<ResourceKey, crate::Error> {
222 let offset = self.allocate(Layout::new::<ThreadInfo>())?;
223 *self.storage.get_object_mut(offset as usize).unwrap() = ThreadInfo { koid, name: *name };
224 Ok(ResourceKey(offset))
225 }
226}
227
228pub struct ResourcesTableReader {
230 storage: MemoryMappedVmo,
231}
232
233impl ResourcesTableReader {
234 pub fn new(vmo: &zx::Vmo) -> Result<ResourcesTableReader, crate::Error> {
235 let storage = MemoryMappedVmo::new_readonly(vmo)?;
236 Ok(ResourcesTableReader { storage })
237 }
238
239 pub fn get_compressed_stack_trace(
241 &self,
242 resource_key: ResourceKey,
243 ) -> Result<&[u8], crate::Error> {
244 let ResourceKey(offset) = resource_key;
245 get_compressed_stack_trace(&self.storage, offset as usize)
246 }
247
248 pub fn get_thread_info(&self, resource_key: ResourceKey) -> Result<&ThreadInfo, crate::Error> {
250 let ResourceKey(offset) = resource_key;
251 Ok(self.storage.get_object(offset as usize)?)
252 }
253}
254
255fn get_compressed_stack_trace(
256 storage: &MemoryMappedVmo,
257 byte_offset: usize,
258) -> Result<&[u8], crate::Error> {
259 let header: StackTraceLength = *storage.get_object(byte_offset)?;
261
262 Ok(storage.get_slice(byte_offset + size_of_val(&header), header as usize)?)
264}
265
266fn set_compressed_stack_trace(
267 storage: &mut MemoryMappedVmo,
268 byte_offset: usize,
269 compressed_stack_trace: &[u8],
270) -> Result<(), crate::Error> {
271 let header: StackTraceLength =
272 compressed_stack_trace.len().try_into().map_err(|_| crate::Error::BufferTooBig)?;
273
274 *storage.get_object_mut(byte_offset)? = header;
276
277 storage
279 .get_slice_mut(byte_offset + size_of_val(&header), compressed_stack_trace.len())?
280 .copy_from_slice(compressed_stack_trace);
281
282 Ok(())
283}
284
285#[cfg(test)]
286mod tests {
287 use super::*;
288 use assert_matches::assert_matches;
289
290 const VMO_SIZE: usize = 4 * 1024 * 1024; struct TestStorage {
294 vmo: zx::Vmo,
295 }
296
297 impl TestStorage {
298 pub fn new(vmo_size: usize) -> TestStorage {
299 let vmo = zx::Vmo::create(vmo_size as u64).unwrap();
300 TestStorage { vmo }
301 }
302
303 fn create_writer(&self) -> ResourcesTableWriter {
304 ResourcesTableWriter::new(&self.vmo).unwrap()
305 }
306
307 fn create_reader(&self) -> ResourcesTableReader {
308 ResourcesTableReader::new(&self.vmo).unwrap()
309 }
310 }
311
312 #[test]
313 fn test_stack_trace_deduplication() {
314 let storage = TestStorage::new(VMO_SIZE);
315 let mut writer = storage.create_writer();
316
317 const COUNT: usize = 2 * NUM_STACK_BUCKETS + 1;
321 let mut pairs = Vec::new();
322 for i in 0..COUNT {
323 let stack_trace = i.to_ne_bytes();
325
326 let (resource_key, inserted) =
327 writer.intern_compressed_stack_trace(&stack_trace).unwrap();
328 assert!(inserted, "expected true because the stack trace was not present");
329
330 pairs.push((stack_trace, resource_key));
331 }
332
333 for (stack_trace, expected_resource_key) in &pairs {
335 let (actual_resource_key, inserted) =
336 writer.intern_compressed_stack_trace(stack_trace).unwrap();
337 assert!(!inserted, "expected false because the stack trace is already present");
338 assert_eq!(actual_resource_key, *expected_resource_key);
339 }
340
341 let reader = storage.create_reader();
343 for (expected_stack_trace, resource_key) in &pairs {
344 let actual_stack_trace = reader.get_compressed_stack_trace(*resource_key).unwrap();
345 assert_eq!(actual_stack_trace, *expected_stack_trace);
346 }
347 }
348
349 #[test]
350 fn test_empty_stack_trace() {
351 let storage = TestStorage::new(VMO_SIZE);
352 let mut writer = storage.create_writer();
353
354 let (resource_key, inserted) = writer.intern_compressed_stack_trace(&[]).unwrap();
356 assert!(inserted);
357
358 let reader = storage.create_reader();
360 let read_result = reader.get_compressed_stack_trace(resource_key).unwrap();
361 assert_eq!(read_result, []);
362 }
363
364 #[test]
365 fn test_long_stack_traces() {
366 let storage = TestStorage::new(VMO_SIZE);
367 let mut writer = storage.create_writer();
368
369 let stack_trace_too_long = vec![0xAA; u16::MAX as usize + 1];
372 let result = writer.intern_compressed_stack_trace(&stack_trace_too_long);
373 assert_matches!(result, Err(crate::Error::BufferTooBig));
374
375 let stack_trace_max_len = vec![0x55; u16::MAX as usize];
377 let (resource_key, _) = writer.intern_compressed_stack_trace(&stack_trace_max_len).unwrap();
378
379 let reader = storage.create_reader();
381 let read_result = reader.get_compressed_stack_trace(resource_key).unwrap();
382 assert_eq!(read_result, stack_trace_max_len);
383 }
384
385 #[test]
386 fn test_write_until_out_of_space() {
387 let storage = TestStorage::new(VMO_SIZE);
388 let mut writer = storage.create_writer();
389
390 for i in 0..=VMO_SIZE {
395 let stack_trace = i.to_ne_bytes();
397
398 if let Err(crate::Error::OutOfSpace) =
399 writer.intern_compressed_stack_trace(&stack_trace)
400 {
401 return; }
403 }
404
405 unreachable!("Inserted more than {} distinct stack traces", VMO_SIZE);
406 }
407
408 #[test]
409 fn test_thread_info() {
410 let storage = TestStorage::new(VMO_SIZE);
411 let mut writer = storage.create_writer();
412
413 const FAKE_KOID: zx::sys::zx_koid_t = 1234;
416 const FAKE_NAME: zx::Name = zx::Name::new_lossy("fake-name");
417 let resource_key = writer.insert_thread_info(FAKE_KOID, &FAKE_NAME).unwrap();
418
419 let reader = storage.create_reader();
421 let thread_info = reader.get_thread_info(resource_key).unwrap();
422 assert_eq!(thread_info.koid, FAKE_KOID);
423 assert_eq!(thread_info.name, FAKE_NAME);
424 }
425}