_core_rustc_static/
hooks.rs1use heapdump_vmo::stack_trace_compression;
6use std::ffi::c_void;
7
8use crate::{with_profiler, PerThreadData, Profiler};
9
10const STACK_TRACE_MAXIMUM_DEPTH: usize = 64;
11const STACK_TRACE_MAXIMUM_COMPRESSED_SIZE: usize =
12 stack_trace_compression::max_compressed_size(STACK_TRACE_MAXIMUM_DEPTH);
13
14extern "C" {
15 fn __sanitizer_fast_backtrace(buffer: *mut u64, buffer_size: usize) -> usize;
16}
17
18#[inline(always)]
22fn with_profiler_and_call_site(
23 f: impl FnOnce(&Profiler, &mut PerThreadData, zx::MonotonicInstant, &[u8]),
24) {
25 let timestamp = zx::MonotonicInstant::get();
27
28 let mut stack_buf = [0; STACK_TRACE_MAXIMUM_DEPTH];
30 let stack_len =
31 unsafe { __sanitizer_fast_backtrace(stack_buf.as_mut_ptr(), STACK_TRACE_MAXIMUM_DEPTH) };
32 let stack = &stack_buf[..stack_len];
33
34 with_profiler(|profiler, thread_data| {
35 let mut compressed_stack_buf = [0; STACK_TRACE_MAXIMUM_COMPRESSED_SIZE];
37 let compressed_stack_len =
38 stack_trace_compression::compress_into(stack, &mut compressed_stack_buf);
39 let compressed_stack = &compressed_stack_buf[..compressed_stack_len];
40
41 f(profiler, thread_data, timestamp, compressed_stack)
42 })
43}
44
45#[no_mangle]
47pub extern "C" fn __scudo_allocate_hook(ptr: *mut c_void, size: usize) {
48 with_profiler_and_call_site(|profiler, thread_data, timestamp, compressed_stack_trace| {
49 profiler.record_allocation(
50 thread_data,
51 ptr as u64,
52 size as u64,
53 compressed_stack_trace,
54 timestamp,
55 );
56 });
57}
58
59#[no_mangle]
61pub extern "C" fn __scudo_deallocate_hook(ptr: *mut c_void) {
62 with_profiler(|profiler, thread_data| {
63 if ptr != std::ptr::null_mut() {
64 profiler.forget_allocation(thread_data, ptr as u64);
65 }
66 });
67}
68
69#[no_mangle]
71pub extern "C" fn __scudo_realloc_deallocate_hook(_old_ptr: *mut c_void) {
72 }
74
75#[no_mangle]
77pub extern "C" fn __scudo_realloc_allocate_hook(
78 old_ptr: *mut c_void,
79 new_ptr: *mut c_void,
80 size: usize,
81) {
82 with_profiler_and_call_site(|profiler, thread_data, timestamp, compressed_stack_trace| {
83 if old_ptr == new_ptr {
85 profiler.update_allocation(
86 thread_data,
87 old_ptr as u64,
88 size as u64,
89 compressed_stack_trace,
90 timestamp,
91 );
92 } else {
93 profiler.record_allocation(
94 thread_data,
95 new_ptr as u64,
96 size as u64,
97 compressed_stack_trace,
98 timestamp,
99 );
100 profiler.forget_allocation(thread_data, old_ptr as u64);
101 }
102 });
103}