zx/
iob.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Type-safe bindings for Zircon iobuffer objects.
6
7use crate::{ok, sys, AsHandleRef, Handle, HandleBased, HandleRef, Status};
8use bitflags::bitflags;
9
10mod io_slice;
11pub use self::io_slice::*;
12
13// TODO(https://fxbug.dev/389788832): Point this at the right place when the documentation is fixed.
14/// An object representing a Zircon
15/// [IOBuffer](https://fuchsia.dev/reference/syscalls/iob_create).
16///
17/// As essentially a subtype of `Handle`, it can be freely interconverted.
18#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
19#[repr(transparent)]
20pub struct Iob(Handle);
21impl_handle_based!(Iob);
22
23#[derive(Default)]
24pub struct IobOptions;
25
26#[derive(Clone, Copy)]
27pub enum IobRegionType<'a> {
28    Private { size: u64, options: IobRegionPrivateOptions },
29    Shared { options: vdso_next::IobRegionSharedOptions, region: &'a vdso_next::IobSharedRegion },
30}
31
32impl IobRegionType<'_> {
33    fn to_raw(&self) -> (sys::zx_iob_region_type_t, sys::zx_iob_region_extension_t) {
34        match self {
35            IobRegionType::Private { .. } => (
36                sys::ZX_IOB_REGION_TYPE_PRIVATE,
37                sys::zx_iob_region_extension_t { private_region: Default::default() },
38            ),
39            IobRegionType::Shared { region, .. } => (
40                sys::ZX_IOB_REGION_TYPE_SHARED,
41                sys::zx_iob_region_extension_t {
42                    shared_region: sys::zx_iob_region_shared_t {
43                        options: 0,
44                        shared_region: region.raw_handle(),
45                        padding: Default::default(),
46                    },
47                },
48            ),
49        }
50    }
51}
52
53#[derive(Clone, Copy, Default)]
54pub struct IobRegionPrivateOptions;
55
56pub struct IobRegion<'a> {
57    pub region_type: IobRegionType<'a>,
58    pub access: IobAccess,
59    pub discipline: IobDiscipline,
60}
61
62bitflags! {
63    #[repr(transparent)]
64    #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
65    pub struct IobAccess: u32 {
66        const EP0_CAN_MAP_READ = sys::ZX_IOB_ACCESS_EP0_CAN_MAP_READ;
67        const EP0_CAN_MAP_WRITE = sys::ZX_IOB_ACCESS_EP0_CAN_MAP_WRITE;
68        const EP0_CAN_MEDIATED_READ = sys::ZX_IOB_ACCESS_EP0_CAN_MEDIATED_READ;
69        const EP0_CAN_MEDIATED_WRITE = sys::ZX_IOB_ACCESS_EP0_CAN_MEDIATED_WRITE;
70        const EP1_CAN_MAP_READ = sys::ZX_IOB_ACCESS_EP1_CAN_MAP_READ;
71        const EP1_CAN_MAP_WRITE = sys::ZX_IOB_ACCESS_EP1_CAN_MAP_WRITE;
72        const EP1_CAN_MEDIATED_READ = sys::ZX_IOB_ACCESS_EP1_CAN_MEDIATED_READ;
73        const EP1_CAN_MEDIATED_WRITE = sys::ZX_IOB_ACCESS_EP1_CAN_MEDIATED_WRITE;
74    }
75}
76
77#[derive(Clone, Copy)]
78pub enum IobDiscipline {
79    None,
80    MediatedWriteRingBuffer { tag: u64 },
81}
82
83impl IobDiscipline {
84    fn to_raw(&self) -> sys::zx_iob_discipline_t {
85        match self {
86            IobDiscipline::None => sys::zx_iob_discipline_t {
87                r#type: sys::ZX_IOB_DISCIPLINE_TYPE_NONE,
88                extension: sys::zx_iob_discipline_extension_t {
89                    reserved: [sys::PadByte::default(); 64],
90                },
91            },
92            IobDiscipline::MediatedWriteRingBuffer { tag } => sys::zx_iob_discipline_t {
93                r#type: sys::ZX_IOB_DISCIPLINE_TYPE_MEDIATED_WRITE_RING_BUFFER,
94                extension: sys::zx_iob_discipline_extension_t {
95                    ring_buffer: sys::zx_iob_discipline_mediated_write_ring_buffer_t {
96                        tag: *tag,
97                        padding: [sys::PadByte::default(); 56],
98                    },
99                },
100            },
101        }
102    }
103}
104
105#[derive(Default)]
106pub struct IobWriteOptions;
107
108impl Iob {
109    /// Creates an IOBuffer.
110    ///
111    /// Wraps the [zx_iob_create](https://fuchsia.dev/reference/syscalls/iob_create) syscall.
112    pub fn create(_options: IobOptions, regions: &[IobRegion<'_>]) -> Result<(Iob, Iob), Status> {
113        let raw_regions: Vec<_> = regions
114            .iter()
115            .map(|r| {
116                let (r#type, extension) = r.region_type.to_raw();
117                sys::zx_iob_region_t {
118                    r#type,
119                    access: r.access.bits(),
120                    size: match &r.region_type {
121                        IobRegionType::Private { size, .. } => *size,
122                        IobRegionType::Shared { .. } => 0,
123                    },
124                    discipline: r.discipline.to_raw(),
125                    extension,
126                }
127            })
128            .collect();
129        let mut handle1 = 0;
130        let mut handle2 = 0;
131        let status = unsafe {
132            sys::zx_iob_create(
133                0,
134                raw_regions.as_ptr() as *const u8,
135                raw_regions.len(),
136                &mut handle1,
137                &mut handle2,
138            )
139        };
140        ok(status)?;
141        unsafe { Ok((Iob::from(Handle::from_raw(handle1)), Iob::from(Handle::from_raw(handle2)))) }
142    }
143
144    /// Performs a mediated write to an IOBuffer region.
145    ///
146    /// Wraps the [zx_iob_writev](https://fuchsia.dev/reference/syscalls/iob_writev) syscall.
147    pub fn write(
148        &self,
149        options: IobWriteOptions,
150        region_index: u32,
151        data: &[u8],
152    ) -> Result<(), Status> {
153        self.writev(options, region_index, &[IobIoSlice::new(data)])
154    }
155
156    /// Performs a mediated write (with vectors) to an IOBuffer region.
157    ///
158    /// Wraps the
159    /// [`zx_stream_writev`](https://fuchsia.dev/fuchsia-src/reference/syscalls/stream_writev)
160    /// syscall.
161    pub fn writev(
162        &self,
163        _options: IobWriteOptions,
164        region_index: u32,
165        iovecs: &[IobIoSlice<'_>],
166    ) -> Result<(), Status> {
167        let status = unsafe {
168            sys::zx_iob_writev(
169                self.raw_handle(),
170                0,
171                region_index,
172                iovecs.as_ptr().cast::<sys::zx_iovec_t>(),
173                iovecs.len(),
174            )
175        };
176        ok(status)?;
177        Ok(())
178    }
179}
180
181pub(crate) mod vdso_next {
182    use super::*;
183
184    use std::sync::OnceLock;
185
186    #[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
187    #[repr(transparent)]
188    pub struct IobSharedRegion(Handle);
189    impl_handle_based!(IobSharedRegion);
190
191    /// Options for `IobSharedRegion::create`.
192    #[derive(Default)]
193    pub struct IobSharedRegionOptions;
194
195    /// Options for `IobRegionType` which is used by `Iob::create`.
196    #[derive(Clone, Copy, Default)]
197    pub struct IobRegionSharedOptions;
198
199    impl IobSharedRegion {
200        /// Creates a shared region that can be used with multiple IOBuffer objects.
201        ///
202        /// Wraps the `zx_iob_create_shared_region` syscall.
203        pub fn create(_options: IobSharedRegionOptions, size: u64) -> Result<Self, Status> {
204            // We have to go through this dance because we now have shared libraries (e.g. the VFS
205            // library) which means we encounter a relocation failure if used with the stable vdso.
206            // Weak link attributes are experimental, so we search for the symbol dynamically.
207            static ZX_IOB_CREATE_SHARED_REGION_FN: OnceLock<
208                unsafe extern "C" fn(u64, u64, *mut sys::zx_handle_t) -> sys::zx_status_t,
209            > = OnceLock::new();
210
211            let zx_iob_create_shared_region = ZX_IOB_CREATE_SHARED_REGION_FN.get_or_init(|| {
212                // SAFETY: These arguments should be safe to pass to dlsym.
213                let symbol = unsafe {
214                    libc::dlsym(libc::RTLD_DEFAULT, c"zx_iob_create_shared_region".as_ptr())
215                };
216                assert!(!symbol.is_null(), "zx_iob_create_shared_region requires vdso next");
217                // SAFETY: The above signature should be correct for the symbol we found.
218                unsafe { std::mem::transmute(symbol) }
219            });
220
221            let mut handle = 0;
222            let status = unsafe { zx_iob_create_shared_region(0, size, &mut handle) };
223            ok(status)?;
224            Ok(Self::from(unsafe { Handle::from_raw(handle) }))
225        }
226    }
227
228    #[cfg(all(test, vdso_next))]
229    mod tests {
230        use crate::handle::AsHandleRef;
231        use crate::{
232            sys, system_get_page_size, Iob, IobDiscipline, IobRegion, IobRegionType,
233            IobSharedRegion, Unowned, Vmar, VmarFlags,
234        };
235        use std::sync::atomic::{AtomicU64, Ordering};
236
237        #[test]
238        fn test() {
239            let region_size = 2 * system_get_page_size() as usize;
240
241            let shared_region =
242                IobSharedRegion::create(region_size as u64, Default::default()).unwrap();
243
244            let (ep0, ep1) = Iob::create(
245                Default::default(),
246                &[IobRegion {
247                    region_type: IobRegionType::Shared(Default::default(), &shared_region),
248                    access: sys::ZX_IOB_ACCESS_EP0_CAN_MAP_READ
249                        | sys::ZX_IOB_ACCESS_EP0_CAN_MAP_WRITE
250                        | sys::ZX_IOB_ACCESS_EP1_CAN_MEDIATED_WRITE,
251                    discipline: IobDiscipline::MediatedWriteRingBuffer,
252                }],
253            )
254            .unwrap();
255
256            ep1.write(Default::default(), 0, b"hello").unwrap();
257
258            let vmar_handle = unsafe { fuchsia_runtime::zx_vmar_root_self() };
259            let vmar = unsafe { Unowned::<Vmar>::from_raw_handle(vmar_handle) };
260            let addr = vmar
261                .map_iob(VmarFlags::PERM_READ | VmarFlags::PERM_WRITE, 0, &ep0, 0, 0, region_size)
262                .unwrap();
263
264            #[repr(C)]
265            struct Header {
266                head: AtomicU64,
267                tail: AtomicU64,
268            }
269
270            let header = unsafe { &*(addr as *const Header) };
271
272            let head = header.head.load(Ordering::Acquire);
273            assert_eq!(head, 24);
274            let tail = header.tail.load(Ordering::Relaxed);
275            assert_eq!(tail, 0);
276
277            struct Message {
278                tag: u64,
279                length: u64,
280                data: [u8; 8],
281            }
282
283            let message =
284                unsafe { &(*((addr + system_get_page_size() as usize) as *const Message)) };
285
286            assert_eq!(message.tag, ep1.get_koid().unwrap().raw_koid());
287            assert_eq!(message.length, 5);
288            assert_eq!(&message.data[..5], b"hello");
289        }
290    }
291}
292
293#[cfg(test)]
294mod tests {
295    use super::{Iob, IobAccess, IobDiscipline, IobRegion, IobRegionType};
296    use crate::{Unowned, Vmar, VmarFlags};
297    use std::sync::atomic::{AtomicU64, Ordering};
298
299    #[test]
300    fn test_create_iob() {
301        let region_size = zx::system_get_page_size() as usize * 8;
302        let (ep0, ep1) = Iob::create(
303            Default::default(),
304            &[IobRegion {
305                region_type: IobRegionType::Private {
306                    size: region_size as u64,
307                    options: Default::default(),
308                },
309                access: IobAccess::EP0_CAN_MAP_READ
310                    | IobAccess::EP0_CAN_MAP_WRITE
311                    | IobAccess::EP1_CAN_MAP_READ,
312                discipline: IobDiscipline::None,
313            }],
314        )
315        .expect("create failed");
316
317        // We can't use fuchsia_runtime other than to get the handle, because its Vmar type is
318        // considered distinct from crate::Vmar.
319        let root_vmar =
320            unsafe { Unowned::<Vmar>::from_raw_handle(fuchsia_runtime::zx_vmar_root_self()) };
321
322        let write_addr = root_vmar
323            .map_iob(VmarFlags::PERM_READ | VmarFlags::PERM_WRITE, 0, &ep0, 0, 0, region_size)
324            .expect("map_iob failed");
325        let read_addr = root_vmar
326            .map_iob(VmarFlags::PERM_READ, 0, &ep1, 0, 0, region_size)
327            .expect("map_iob failed");
328
329        const VALUE: u64 = 0x123456789abcdef;
330
331        unsafe { &*(write_addr as *const AtomicU64) }.store(VALUE, Ordering::Relaxed);
332
333        assert_eq!(unsafe { &*(read_addr as *const AtomicU64) }.load(Ordering::Relaxed), VALUE);
334
335        unsafe {
336            root_vmar.unmap(write_addr, region_size).expect("unmap failed");
337            root_vmar.unmap(read_addr, region_size).expect("unmap failed");
338        }
339    }
340}