fidl_fuchsia_sysmem2/fidl_fuchsia_sysmem2.rs
1// WARNING: This file is machine generated by fidlgen.
2
3#![warn(clippy::all)]
4#![allow(unused_parens, unused_mut, unused_imports, nonstandard_style)]
5
6use bitflags::bitflags;
7use fidl::client::QueryResponseFut;
8use fidl::encoding::{MessageBufFor, ProxyChannelBox, ResourceDialect};
9use fidl::endpoints::{ControlHandle as _, Responder as _};
10pub use fidl_fuchsia_sysmem2__common::*;
11use futures::future::{self, MaybeDone, TryFutureExt};
12use zx_status;
13
14#[derive(Debug, Default, PartialEq)]
15pub struct AllocatorAllocateNonSharedCollectionRequest {
16 pub collection_request: Option<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17 #[doc(hidden)]
18 pub __source_breaking: fidl::marker::SourceBreaking,
19}
20
21impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
22 for AllocatorAllocateNonSharedCollectionRequest
23{
24}
25
26#[derive(Debug, Default, PartialEq)]
27pub struct AllocatorAllocateSharedCollectionRequest {
28 pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
29 #[doc(hidden)]
30 pub __source_breaking: fidl::marker::SourceBreaking,
31}
32
33impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
34 for AllocatorAllocateSharedCollectionRequest
35{
36}
37
38#[derive(Debug, Default, PartialEq)]
39pub struct AllocatorBindSharedCollectionRequest {
40 pub token: Option<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>,
41 pub buffer_collection_request: Option<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
42 #[doc(hidden)]
43 pub __source_breaking: fidl::marker::SourceBreaking,
44}
45
46impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
47 for AllocatorBindSharedCollectionRequest
48{
49}
50
51#[derive(Debug, Default, PartialEq)]
52pub struct AllocatorGetVmoInfoRequest {
53 /// `vmo` is required to be set; ownership is transferred to the server
54 /// so in most cases a client will duplicate a handle and transfer the
55 /// duplicate via this field.
56 pub vmo: Option<fidl::Vmo>,
57 #[doc(hidden)]
58 pub __source_breaking: fidl::marker::SourceBreaking,
59}
60
61impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
62 for AllocatorGetVmoInfoRequest
63{
64}
65
66#[derive(Debug, Default, PartialEq)]
67pub struct AllocatorGetVmoInfoResponse {
68 pub buffer_collection_id: Option<u64>,
69 pub buffer_index: Option<u64>,
70 pub close_weak_asap: Option<fidl::EventPair>,
71 #[doc(hidden)]
72 pub __source_breaking: fidl::marker::SourceBreaking,
73}
74
75impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
76 for AllocatorGetVmoInfoResponse
77{
78}
79
80#[derive(Debug, Default, PartialEq)]
81pub struct BufferCollectionAttachLifetimeTrackingRequest {
82 pub server_end: Option<fidl::EventPair>,
83 pub buffers_remaining: Option<u32>,
84 #[doc(hidden)]
85 pub __source_breaking: fidl::marker::SourceBreaking,
86}
87
88impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
89 for BufferCollectionAttachLifetimeTrackingRequest
90{
91}
92
93#[derive(Debug, Default, PartialEq)]
94pub struct BufferCollectionAttachTokenRequest {
95 pub rights_attenuation_mask: Option<fidl::Rights>,
96 pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
97 #[doc(hidden)]
98 pub __source_breaking: fidl::marker::SourceBreaking,
99}
100
101impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
102 for BufferCollectionAttachTokenRequest
103{
104}
105
106/// Information about a buffer collection and its buffers.
107#[derive(Debug, Default, PartialEq)]
108pub struct BufferCollectionInfo {
109 /// These settings apply to all the buffers in the initial buffer
110 /// allocation.
111 ///
112 /// This field will always be set by sysmem.
113 pub settings: Option<SingleBufferSettings>,
114 /// VMO handles (and vmo_usable_start offset) for each buffer in the
115 /// collection.
116 ///
117 /// The size of this vector is the buffer_count (buffer_count is not sent
118 /// separately).
119 ///
120 /// All buffer VMO handles have identical size and access rights. The size
121 /// is in settings.buffer_settings.size_bytes.
122 ///
123 /// The VMO access rights are determined based on the usages which the
124 /// client specified when allocating the buffer collection. For example, a
125 /// client which expressed a read-only usage will receive VMOs without write
126 /// rights. In addition, the rights can be attenuated by the parameter to
127 /// BufferCollectionToken.Duplicate() calls.
128 ///
129 /// This field will always have VmoBuffer(s) in it, even if the participant
130 /// specifies usage whieh does not require VMO handles. This permits such a
131 /// participant to know the vmo_usable_start values, in case that's of any
132 /// use to the participant.
133 ///
134 /// This field will always be set by sysmem, even if the participant doesn't
135 /// specify any buffer usage (but the [`fuchsia.sysmem2/VmoBuffer.vmo`]
136 /// sub-field within this field won't be set in that case).
137 pub buffers: Option<Vec<VmoBuffer>>,
138 /// This number is unique among all logical buffer collections per boot.
139 ///
140 /// This ID number will be the same for all BufferCollectionToken(s),
141 /// BufferCollection(s), and BufferCollectionTokenGroup(s) associated with
142 /// the same logical buffer collection (derived from the same root token
143 /// created with fuchsia.sysmem2.Allocator.CreateSharedCollection, or with
144 /// CreateNonSharedCollection).
145 ///
146 /// The same ID can be retrieved from a BufferCollectionToken,
147 /// BufferCollection, or BufferCollectionTokenGroup using
148 /// GetBufferCollectionId (at the cost of a round-trip to sysmem and back).
149 ///
150 /// This field will always be set by sysmem.
151 pub buffer_collection_id: Option<u64>,
152 #[doc(hidden)]
153 pub __source_breaking: fidl::marker::SourceBreaking,
154}
155
156impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for BufferCollectionInfo {}
157
158#[derive(Debug, Default, PartialEq)]
159pub struct BufferCollectionSetConstraintsRequest {
160 pub constraints: Option<BufferCollectionConstraints>,
161 #[doc(hidden)]
162 pub __source_breaking: fidl::marker::SourceBreaking,
163}
164
165impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
166 for BufferCollectionSetConstraintsRequest
167{
168}
169
170#[derive(Debug, Default, PartialEq)]
171pub struct BufferCollectionTokenCreateBufferCollectionTokenGroupRequest {
172 pub group_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>>,
173 #[doc(hidden)]
174 pub __source_breaking: fidl::marker::SourceBreaking,
175}
176
177impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
178 for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
179{
180}
181
182#[derive(Debug, Default, PartialEq)]
183pub struct BufferCollectionTokenDuplicateRequest {
184 pub rights_attenuation_mask: Option<fidl::Rights>,
185 pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
186 #[doc(hidden)]
187 pub __source_breaking: fidl::marker::SourceBreaking,
188}
189
190impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
191 for BufferCollectionTokenDuplicateRequest
192{
193}
194
195#[derive(Debug, Default, PartialEq)]
196pub struct BufferCollectionTokenGroupCreateChildRequest {
197 /// Must be set.
198 pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
199 /// If not set, the default is `ZX_RIGHT_SAME_RIGHTS`.
200 pub rights_attenuation_mask: Option<fidl::Rights>,
201 #[doc(hidden)]
202 pub __source_breaking: fidl::marker::SourceBreaking,
203}
204
205impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
206 for BufferCollectionTokenGroupCreateChildRequest
207{
208}
209
210#[derive(Debug, Default, PartialEq)]
211pub struct BufferCollectionTokenGroupCreateChildrenSyncResponse {
212 pub tokens: Option<Vec<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>>,
213 #[doc(hidden)]
214 pub __source_breaking: fidl::marker::SourceBreaking,
215}
216
217impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
218 for BufferCollectionTokenGroupCreateChildrenSyncResponse
219{
220}
221
222#[derive(Debug, Default, PartialEq)]
223pub struct BufferCollectionTokenDuplicateSyncResponse {
224 pub tokens: Option<Vec<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>>,
225 #[doc(hidden)]
226 pub __source_breaking: fidl::marker::SourceBreaking,
227}
228
229impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
230 for BufferCollectionTokenDuplicateSyncResponse
231{
232}
233
234#[derive(Debug, Default, PartialEq)]
235pub struct BufferCollectionWaitForAllBuffersAllocatedResponse {
236 pub buffer_collection_info: Option<BufferCollectionInfo>,
237 #[doc(hidden)]
238 pub __source_breaking: fidl::marker::SourceBreaking,
239}
240
241impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
242 for BufferCollectionWaitForAllBuffersAllocatedResponse
243{
244}
245
246#[derive(Debug, Default, PartialEq)]
247pub struct NodeAttachNodeTrackingRequest {
248 /// This field must be set. This evenpair end will be closed after the
249 /// `Node` is closed or failed and the node's buffer counts are no
250 /// longer in effect in the logical buffer collection.
251 pub server_end: Option<fidl::EventPair>,
252 #[doc(hidden)]
253 pub __source_breaking: fidl::marker::SourceBreaking,
254}
255
256impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
257 for NodeAttachNodeTrackingRequest
258{
259}
260
261#[derive(Debug, Default, PartialEq)]
262pub struct NodeIsAlternateForRequest {
263 pub node_ref: Option<fidl::Event>,
264 #[doc(hidden)]
265 pub __source_breaking: fidl::marker::SourceBreaking,
266}
267
268impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeIsAlternateForRequest {}
269
270#[derive(Debug, Default, PartialEq)]
271pub struct NodeSetWeakOkRequest {
272 pub for_child_nodes_also: Option<bool>,
273 #[doc(hidden)]
274 pub __source_breaking: fidl::marker::SourceBreaking,
275}
276
277impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeSetWeakOkRequest {}
278
279#[derive(Debug, Default, PartialEq)]
280pub struct NodeGetNodeRefResponse {
281 pub node_ref: Option<fidl::Event>,
282 #[doc(hidden)]
283 pub __source_breaking: fidl::marker::SourceBreaking,
284}
285
286impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeGetNodeRefResponse {}
287
288#[derive(Debug, Default, PartialEq)]
289pub struct VmoBuffer {
290 /// `vmo` can be un-set if a participant has only
291 /// [`fuchsia.sysmem2/BufferUsage.none`] set to `NONE_USAGE` (explicitly or
292 /// implicitly by [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
293 /// without `constraints` set).
294 pub vmo: Option<fidl::Vmo>,
295 /// Offset within the VMO of the first usable byte. Must be < the VMO's size
296 /// in bytes, and leave sufficient room for BufferMemorySettings.size_bytes
297 /// before the end of the VMO.
298 ///
299 /// Currently sysmem will always set this field to 0, and in future, sysmem
300 /// won't set this field to a non-zero value unless all participants have
301 /// explicitly indicated support for non-zero vmo_usable_start (this
302 /// mechanism does not exist as of this comment). A participant that hasn't
303 /// explicitly indicated support for non-zero vmo_usable_start (all current
304 /// clients) should implicitly assume this field is set to 0 without
305 /// actually checking this field.
306 pub vmo_usable_start: Option<u64>,
307 /// This field is set iff `vmo` is a sysmem weak VMO handle. The client must
308 /// keep `close_weak_asap` around for as long as `vmo`, and must notice
309 /// `ZX_EVENTPAIR_PEER_CLOSED`. If that signal occurs, the client must close
310 /// `vmo` asap. Not doing so is considered a VMO leak by the client and in
311 /// that case sysmem will eventually complain loudly via syslog (currently
312 /// 5s later).
313 pub close_weak_asap: Option<fidl::EventPair>,
314 #[doc(hidden)]
315 pub __source_breaking: fidl::marker::SourceBreaking,
316}
317
318impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for VmoBuffer {}
319
320#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
321pub struct AllocatorMarker;
322
323impl fidl::endpoints::ProtocolMarker for AllocatorMarker {
324 type Proxy = AllocatorProxy;
325 type RequestStream = AllocatorRequestStream;
326 #[cfg(target_os = "fuchsia")]
327 type SynchronousProxy = AllocatorSynchronousProxy;
328
329 const DEBUG_NAME: &'static str = "fuchsia.sysmem2.Allocator";
330}
331impl fidl::endpoints::DiscoverableProtocolMarker for AllocatorMarker {}
332pub type AllocatorGetVmoInfoResult = Result<AllocatorGetVmoInfoResponse, Error>;
333
334pub trait AllocatorProxyInterface: Send + Sync {
335 fn r#allocate_non_shared_collection(
336 &self,
337 payload: AllocatorAllocateNonSharedCollectionRequest,
338 ) -> Result<(), fidl::Error>;
339 fn r#allocate_shared_collection(
340 &self,
341 payload: AllocatorAllocateSharedCollectionRequest,
342 ) -> Result<(), fidl::Error>;
343 fn r#bind_shared_collection(
344 &self,
345 payload: AllocatorBindSharedCollectionRequest,
346 ) -> Result<(), fidl::Error>;
347 type ValidateBufferCollectionTokenResponseFut: std::future::Future<
348 Output = Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error>,
349 > + Send;
350 fn r#validate_buffer_collection_token(
351 &self,
352 payload: &AllocatorValidateBufferCollectionTokenRequest,
353 ) -> Self::ValidateBufferCollectionTokenResponseFut;
354 fn r#set_debug_client_info(
355 &self,
356 payload: &AllocatorSetDebugClientInfoRequest,
357 ) -> Result<(), fidl::Error>;
358 type GetVmoInfoResponseFut: std::future::Future<Output = Result<AllocatorGetVmoInfoResult, fidl::Error>>
359 + Send;
360 fn r#get_vmo_info(&self, payload: AllocatorGetVmoInfoRequest) -> Self::GetVmoInfoResponseFut;
361}
362#[derive(Debug)]
363#[cfg(target_os = "fuchsia")]
364pub struct AllocatorSynchronousProxy {
365 client: fidl::client::sync::Client,
366}
367
368#[cfg(target_os = "fuchsia")]
369impl fidl::endpoints::SynchronousProxy for AllocatorSynchronousProxy {
370 type Proxy = AllocatorProxy;
371 type Protocol = AllocatorMarker;
372
373 fn from_channel(inner: fidl::Channel) -> Self {
374 Self::new(inner)
375 }
376
377 fn into_channel(self) -> fidl::Channel {
378 self.client.into_channel()
379 }
380
381 fn as_channel(&self) -> &fidl::Channel {
382 self.client.as_channel()
383 }
384}
385
386#[cfg(target_os = "fuchsia")]
387impl AllocatorSynchronousProxy {
388 pub fn new(channel: fidl::Channel) -> Self {
389 let protocol_name = <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
390 Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
391 }
392
393 pub fn into_channel(self) -> fidl::Channel {
394 self.client.into_channel()
395 }
396
397 /// Waits until an event arrives and returns it. It is safe for other
398 /// threads to make concurrent requests while waiting for an event.
399 pub fn wait_for_event(
400 &self,
401 deadline: zx::MonotonicInstant,
402 ) -> Result<AllocatorEvent, fidl::Error> {
403 AllocatorEvent::decode(self.client.wait_for_event(deadline)?)
404 }
405
406 /// Allocates a buffer collection on behalf of a single client (aka
407 /// initiator) who is also the only participant (from the point of view of
408 /// sysmem).
409 ///
410 /// This call exists mainly for temp/testing purposes. This call skips the
411 /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
412 /// allow another participant to specify its constraints.
413 ///
414 /// Real clients are encouraged to use
415 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
416 /// let relevant participants directly convey their own constraints to
417 /// sysmem by sending `BufferCollectionToken`s to those participants.
418 ///
419 /// + request `collection_request` The server end of the
420 /// [`fuchsia.sysmem2/BufferCollection`].
421 pub fn r#allocate_non_shared_collection(
422 &self,
423 mut payload: AllocatorAllocateNonSharedCollectionRequest,
424 ) -> Result<(), fidl::Error> {
425 self.client.send::<AllocatorAllocateNonSharedCollectionRequest>(
426 &mut payload,
427 0x5ca681f025a80e44,
428 fidl::encoding::DynamicFlags::FLEXIBLE,
429 )
430 }
431
432 /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
433 ///
434 /// The `BufferCollectionToken` can be "duplicated" for distribution to
435 /// participants by using
436 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
437 /// `BufferCollectionToken` can be converted into a
438 /// [`fuchsia.sysmem2.BufferCollection`] using
439 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
440 ///
441 /// Buffer constraints can be set via
442 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
443 ///
444 /// Success/failure to populate the buffer collection with buffers can be
445 /// determined from
446 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
447 ///
448 /// Closing the client end of a `BufferCollectionToken` or
449 /// `BufferCollection` (without `Release` first) will fail all client ends
450 /// in the same failure domain, which by default is all client ends of the
451 /// buffer collection. See
452 /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
453 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
454 /// separate failure domains within a buffer collection.
455 pub fn r#allocate_shared_collection(
456 &self,
457 mut payload: AllocatorAllocateSharedCollectionRequest,
458 ) -> Result<(), fidl::Error> {
459 self.client.send::<AllocatorAllocateSharedCollectionRequest>(
460 &mut payload,
461 0x11a19ff51f0b49c1,
462 fidl::encoding::DynamicFlags::FLEXIBLE,
463 )
464 }
465
466 /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
467 /// [`fuchsia.sysmem2/BufferCollection`].
468 ///
469 /// At the time of sending this message, the buffer collection hasn't yet
470 /// been populated with buffers - the participant must first also send
471 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
472 /// `BufferCollection` client end.
473 ///
474 /// All `BufferCollectionToken`(s) duplicated from a root
475 /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
476 /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
477 /// existing `BufferCollection` client ends must have sent `SetConstraints`
478 /// before the logical BufferCollection will be populated with buffers (or
479 /// will fail if the overall set of constraints can't be satisfied).
480 ///
481 /// + request `token` The client endpoint of a channel whose server end was
482 /// sent to sysmem using
483 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
484 /// end was sent to sysmem using
485 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. The token is
486 /// being "turned in" in exchange for a
487 /// [`fuchsia.sysmem2/BufferCollection`].
488 /// + request `buffer_collection_request` The server end of a
489 /// [`fuchsia.sysmem2/BufferCollection`] channel. The sender retains the
490 /// client end. The `BufferCollection` channel is a single participant's
491 /// connection to the logical buffer collection. Typically there will be
492 /// other participants with their own `BufferCollection` channel to the
493 /// logical buffer collection.
494 pub fn r#bind_shared_collection(
495 &self,
496 mut payload: AllocatorBindSharedCollectionRequest,
497 ) -> Result<(), fidl::Error> {
498 self.client.send::<AllocatorBindSharedCollectionRequest>(
499 &mut payload,
500 0x550916b0dc1d5b4e,
501 fidl::encoding::DynamicFlags::FLEXIBLE,
502 )
503 }
504
505 /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
506 /// the sysmem server.
507 ///
508 /// With this call, the client can determine whether an incoming token is a
509 /// real sysmem token that is known to the sysmem server, without any risk
510 /// of getting stuck waiting forever on a potentially fake token to complete
511 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
512 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
513 /// FIDL message). In cases where the client trusts the source of the token
514 /// to provide a real token, this call is not typically needed outside of
515 /// debugging.
516 ///
517 /// If the validate fails sometimes but succeeds other times, the source of
518 /// the token may itself not be calling
519 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
520 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
521 /// token but before sending the token to the current client. It may be more
522 /// convenient for the source to use
523 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
524 /// token(s), since that call has the sync step built in. Or, the buffer
525 /// collection may be failing before this call is processed by the sysmem
526 /// server, as buffer collection failure cleans up sysmem's tracking of
527 /// associated tokens.
528 ///
529 /// This call has no effect on any token.
530 ///
531 /// + request `token_server_koid` The koid of the server end of a channel
532 /// that might be a BufferCollectionToken channel. This can be obtained
533 /// via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
534 /// - response `is_known` true means sysmem knew of the token at the time
535 /// sysmem processed the request, but doesn't guarantee that the token is
536 /// still valid by the time the client receives the reply. What it does
537 /// guarantee is that the token at least was a real token, so a two-way
538 /// call to the token won't stall forever (will fail or succeed fairly
539 /// quickly, not stall). This can already be known implicitly if the
540 /// source of the token can be trusted to provide a real token. A false
541 /// value means the token wasn't known to sysmem at the time sysmem
542 /// processed this call, but the token may have previously been valid, or
543 /// may yet become valid. Or if the sender of the token isn't trusted to
544 /// provide a real token, the token may be fake. It's the responsibility
545 /// of the sender to sync with sysmem to ensure that previously
546 /// created/duplicated token(s) are known to sysmem, before sending the
547 /// token(s) to other participants.
548 pub fn r#validate_buffer_collection_token(
549 &self,
550 mut payload: &AllocatorValidateBufferCollectionTokenRequest,
551 ___deadline: zx::MonotonicInstant,
552 ) -> Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error> {
553 let _response = self.client.send_query::<
554 AllocatorValidateBufferCollectionTokenRequest,
555 fidl::encoding::FlexibleType<AllocatorValidateBufferCollectionTokenResponse>,
556 >(
557 payload,
558 0x4c5ee91b02a7e68d,
559 fidl::encoding::DynamicFlags::FLEXIBLE,
560 ___deadline,
561 )?
562 .into_result::<AllocatorMarker>("validate_buffer_collection_token")?;
563 Ok(_response)
564 }
565
566 /// Set information about the current client that can be used by sysmem to
567 /// help diagnose leaking memory and allocation stalls waiting for a
568 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
569 ///
570 /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
571 /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
572 /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
573 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
574 /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
575 /// these `BufferCollection`(s) have the same initial debug client info as
576 /// the token turned in to create the `BufferCollection`).
577 ///
578 /// This info can be subsequently overridden on a per-`Node` basis by
579 /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
580 ///
581 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
582 /// `Allocator` is the most efficient way to ensure that all
583 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
584 /// set, and is also more efficient than separately sending the same debug
585 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
586 /// created [`fuchsia.sysmem2/Node`].
587 ///
588 /// + request `name` This can be an arbitrary string, but the current
589 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
590 /// + request `id` This can be an arbitrary id, but the current process ID
591 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
592 pub fn r#set_debug_client_info(
593 &self,
594 mut payload: &AllocatorSetDebugClientInfoRequest,
595 ) -> Result<(), fidl::Error> {
596 self.client.send::<AllocatorSetDebugClientInfoRequest>(
597 payload,
598 0x6f68f19a3f509c4d,
599 fidl::encoding::DynamicFlags::FLEXIBLE,
600 )
601 }
602
603 /// Given a handle to a sysmem-provided VMO, this returns additional info
604 /// about the corresponding sysmem logical buffer.
605 ///
606 /// Most callers will duplicate a VMO handle first and send the duplicate to
607 /// this call.
608 ///
609 /// If the client has created a child VMO of a sysmem-provided VMO, that
610 /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
611 ///
612 /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
613 /// - response `buffer_collection_id` The buffer collection ID, which is
614 /// unique per logical buffer collection per boot.
615 /// - response `buffer_index` The buffer index of the buffer within the
616 /// buffer collection. This is the same as the index of the buffer within
617 /// [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
618 /// is the same for all sysmem-delivered VMOs corresponding to the same
619 /// logical buffer, even if the VMO koids differ. The `buffer_index` is
620 /// only unique across buffers of a buffer collection. For a given buffer,
621 /// the combination of `buffer_collection_id` and `buffer_index` is unique
622 /// per boot.
623 /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
624 /// the `close_weak_asap` field will be set in the response. This handle
625 /// will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
626 /// the buffer should be closed as soon as possible. This is signalled
627 /// shortly after all strong sysmem VMOs to the buffer are closed
628 /// (including any held indirectly via strong `BufferCollectionToken` or
629 /// strong `BufferCollection`). Failure to close all weak sysmem VMO
630 /// handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
631 /// considered a VMO leak caused by the client still holding a weak sysmem
632 /// VMO handle and results in loud complaints to the log by sysmem. The
633 /// buffers of a collection can be freed independently of each other. The
634 /// `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
635 /// response arrives at the client. A client that isn't prepared to handle
636 /// weak sysmem VMOs, on seeing this field set, can close all handles to
637 /// the buffer and fail any associated request.
638 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
639 /// VMO. Both strong and weak sysmem VMOs can be passed to this call, and
640 /// the VMO handle passed in to this call itself keeps the VMO's info
641 /// alive for purposes of responding to this call. Because of this,
642 /// ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
643 /// handles to the VMO when calling; even if other handles are closed
644 /// before the GetVmoInfo response arrives at the client).
645 /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
646 /// capable of being used with GetVmoInfo due to rights/capability
647 /// attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
648 /// topic [`ZX_INFO_HANDLE_BASIC`].
649 /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
650 /// unspecified reason. See the log for more info.
651 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
652 /// wasn't set, or there was some other problem with the request field(s).
653 pub fn r#get_vmo_info(
654 &self,
655 mut payload: AllocatorGetVmoInfoRequest,
656 ___deadline: zx::MonotonicInstant,
657 ) -> Result<AllocatorGetVmoInfoResult, fidl::Error> {
658 let _response = self.client.send_query::<
659 AllocatorGetVmoInfoRequest,
660 fidl::encoding::FlexibleResultType<AllocatorGetVmoInfoResponse, Error>,
661 >(
662 &mut payload,
663 0x21a881120aa0ddf9,
664 fidl::encoding::DynamicFlags::FLEXIBLE,
665 ___deadline,
666 )?
667 .into_result::<AllocatorMarker>("get_vmo_info")?;
668 Ok(_response.map(|x| x))
669 }
670}
671
672#[cfg(target_os = "fuchsia")]
673impl From<AllocatorSynchronousProxy> for zx::Handle {
674 fn from(value: AllocatorSynchronousProxy) -> Self {
675 value.into_channel().into()
676 }
677}
678
679#[cfg(target_os = "fuchsia")]
680impl From<fidl::Channel> for AllocatorSynchronousProxy {
681 fn from(value: fidl::Channel) -> Self {
682 Self::new(value)
683 }
684}
685
686#[cfg(target_os = "fuchsia")]
687impl fidl::endpoints::FromClient for AllocatorSynchronousProxy {
688 type Protocol = AllocatorMarker;
689
690 fn from_client(value: fidl::endpoints::ClientEnd<AllocatorMarker>) -> Self {
691 Self::new(value.into_channel())
692 }
693}
694
695#[derive(Debug, Clone)]
696pub struct AllocatorProxy {
697 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
698}
699
700impl fidl::endpoints::Proxy for AllocatorProxy {
701 type Protocol = AllocatorMarker;
702
703 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
704 Self::new(inner)
705 }
706
707 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
708 self.client.into_channel().map_err(|client| Self { client })
709 }
710
711 fn as_channel(&self) -> &::fidl::AsyncChannel {
712 self.client.as_channel()
713 }
714}
715
716impl AllocatorProxy {
717 /// Create a new Proxy for fuchsia.sysmem2/Allocator.
718 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
719 let protocol_name = <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
720 Self { client: fidl::client::Client::new(channel, protocol_name) }
721 }
722
723 /// Get a Stream of events from the remote end of the protocol.
724 ///
725 /// # Panics
726 ///
727 /// Panics if the event stream was already taken.
728 pub fn take_event_stream(&self) -> AllocatorEventStream {
729 AllocatorEventStream { event_receiver: self.client.take_event_receiver() }
730 }
731
732 /// Allocates a buffer collection on behalf of a single client (aka
733 /// initiator) who is also the only participant (from the point of view of
734 /// sysmem).
735 ///
736 /// This call exists mainly for temp/testing purposes. This call skips the
737 /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
738 /// allow another participant to specify its constraints.
739 ///
740 /// Real clients are encouraged to use
741 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
742 /// let relevant participants directly convey their own constraints to
743 /// sysmem by sending `BufferCollectionToken`s to those participants.
744 ///
745 /// + request `collection_request` The server end of the
746 /// [`fuchsia.sysmem2/BufferCollection`].
747 pub fn r#allocate_non_shared_collection(
748 &self,
749 mut payload: AllocatorAllocateNonSharedCollectionRequest,
750 ) -> Result<(), fidl::Error> {
751 AllocatorProxyInterface::r#allocate_non_shared_collection(self, payload)
752 }
753
754 /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
755 ///
756 /// The `BufferCollectionToken` can be "duplicated" for distribution to
757 /// participants by using
758 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
759 /// `BufferCollectionToken` can be converted into a
760 /// [`fuchsia.sysmem2.BufferCollection`] using
761 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
762 ///
763 /// Buffer constraints can be set via
764 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
765 ///
766 /// Success/failure to populate the buffer collection with buffers can be
767 /// determined from
768 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
769 ///
770 /// Closing the client end of a `BufferCollectionToken` or
771 /// `BufferCollection` (without `Release` first) will fail all client ends
772 /// in the same failure domain, which by default is all client ends of the
773 /// buffer collection. See
774 /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
775 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
776 /// separate failure domains within a buffer collection.
777 pub fn r#allocate_shared_collection(
778 &self,
779 mut payload: AllocatorAllocateSharedCollectionRequest,
780 ) -> Result<(), fidl::Error> {
781 AllocatorProxyInterface::r#allocate_shared_collection(self, payload)
782 }
783
784 /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
785 /// [`fuchsia.sysmem2/BufferCollection`].
786 ///
787 /// At the time of sending this message, the buffer collection hasn't yet
788 /// been populated with buffers - the participant must first also send
789 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
790 /// `BufferCollection` client end.
791 ///
792 /// All `BufferCollectionToken`(s) duplicated from a root
793 /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
794 /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
795 /// existing `BufferCollection` client ends must have sent `SetConstraints`
796 /// before the logical BufferCollection will be populated with buffers (or
797 /// will fail if the overall set of constraints can't be satisfied).
798 ///
799 /// + request `token` The client endpoint of a channel whose server end was
800 /// sent to sysmem using
801 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
802 /// end was sent to sysmem using
803 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. The token is
804 /// being "turned in" in exchange for a
805 /// [`fuchsia.sysmem2/BufferCollection`].
806 /// + request `buffer_collection_request` The server end of a
807 /// [`fuchsia.sysmem2/BufferCollection`] channel. The sender retains the
808 /// client end. The `BufferCollection` channel is a single participant's
809 /// connection to the logical buffer collection. Typically there will be
810 /// other participants with their own `BufferCollection` channel to the
811 /// logical buffer collection.
812 pub fn r#bind_shared_collection(
813 &self,
814 mut payload: AllocatorBindSharedCollectionRequest,
815 ) -> Result<(), fidl::Error> {
816 AllocatorProxyInterface::r#bind_shared_collection(self, payload)
817 }
818
819 /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
820 /// the sysmem server.
821 ///
822 /// With this call, the client can determine whether an incoming token is a
823 /// real sysmem token that is known to the sysmem server, without any risk
824 /// of getting stuck waiting forever on a potentially fake token to complete
825 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
826 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
827 /// FIDL message). In cases where the client trusts the source of the token
828 /// to provide a real token, this call is not typically needed outside of
829 /// debugging.
830 ///
831 /// If the validate fails sometimes but succeeds other times, the source of
832 /// the token may itself not be calling
833 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
834 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
835 /// token but before sending the token to the current client. It may be more
836 /// convenient for the source to use
837 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
838 /// token(s), since that call has the sync step built in. Or, the buffer
839 /// collection may be failing before this call is processed by the sysmem
840 /// server, as buffer collection failure cleans up sysmem's tracking of
841 /// associated tokens.
842 ///
843 /// This call has no effect on any token.
844 ///
845 /// + request `token_server_koid` The koid of the server end of a channel
846 /// that might be a BufferCollectionToken channel. This can be obtained
847 /// via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
848 /// - response `is_known` true means sysmem knew of the token at the time
849 /// sysmem processed the request, but doesn't guarantee that the token is
850 /// still valid by the time the client receives the reply. What it does
851 /// guarantee is that the token at least was a real token, so a two-way
852 /// call to the token won't stall forever (will fail or succeed fairly
853 /// quickly, not stall). This can already be known implicitly if the
854 /// source of the token can be trusted to provide a real token. A false
855 /// value means the token wasn't known to sysmem at the time sysmem
856 /// processed this call, but the token may have previously been valid, or
857 /// may yet become valid. Or if the sender of the token isn't trusted to
858 /// provide a real token, the token may be fake. It's the responsibility
859 /// of the sender to sync with sysmem to ensure that previously
860 /// created/duplicated token(s) are known to sysmem, before sending the
861 /// token(s) to other participants.
862 pub fn r#validate_buffer_collection_token(
863 &self,
864 mut payload: &AllocatorValidateBufferCollectionTokenRequest,
865 ) -> fidl::client::QueryResponseFut<
866 AllocatorValidateBufferCollectionTokenResponse,
867 fidl::encoding::DefaultFuchsiaResourceDialect,
868 > {
869 AllocatorProxyInterface::r#validate_buffer_collection_token(self, payload)
870 }
871
872 /// Set information about the current client that can be used by sysmem to
873 /// help diagnose leaking memory and allocation stalls waiting for a
874 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
875 ///
876 /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
877 /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
878 /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
879 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
880 /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
881 /// these `BufferCollection`(s) have the same initial debug client info as
882 /// the token turned in to create the `BufferCollection`).
883 ///
884 /// This info can be subsequently overridden on a per-`Node` basis by
885 /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
886 ///
887 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
888 /// `Allocator` is the most efficient way to ensure that all
889 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
890 /// set, and is also more efficient than separately sending the same debug
891 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
892 /// created [`fuchsia.sysmem2/Node`].
893 ///
894 /// + request `name` This can be an arbitrary string, but the current
895 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
896 /// + request `id` This can be an arbitrary id, but the current process ID
897 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
898 pub fn r#set_debug_client_info(
899 &self,
900 mut payload: &AllocatorSetDebugClientInfoRequest,
901 ) -> Result<(), fidl::Error> {
902 AllocatorProxyInterface::r#set_debug_client_info(self, payload)
903 }
904
905 /// Given a handle to a sysmem-provided VMO, this returns additional info
906 /// about the corresponding sysmem logical buffer.
907 ///
908 /// Most callers will duplicate a VMO handle first and send the duplicate to
909 /// this call.
910 ///
911 /// If the client has created a child VMO of a sysmem-provided VMO, that
912 /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
913 ///
914 /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
915 /// - response `buffer_collection_id` The buffer collection ID, which is
916 /// unique per logical buffer collection per boot.
917 /// - response `buffer_index` The buffer index of the buffer within the
918 /// buffer collection. This is the same as the index of the buffer within
919 /// [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
920 /// is the same for all sysmem-delivered VMOs corresponding to the same
921 /// logical buffer, even if the VMO koids differ. The `buffer_index` is
922 /// only unique across buffers of a buffer collection. For a given buffer,
923 /// the combination of `buffer_collection_id` and `buffer_index` is unique
924 /// per boot.
925 /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
926 /// the `close_weak_asap` field will be set in the response. This handle
927 /// will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
928 /// the buffer should be closed as soon as possible. This is signalled
929 /// shortly after all strong sysmem VMOs to the buffer are closed
930 /// (including any held indirectly via strong `BufferCollectionToken` or
931 /// strong `BufferCollection`). Failure to close all weak sysmem VMO
932 /// handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
933 /// considered a VMO leak caused by the client still holding a weak sysmem
934 /// VMO handle and results in loud complaints to the log by sysmem. The
935 /// buffers of a collection can be freed independently of each other. The
936 /// `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
937 /// response arrives at the client. A client that isn't prepared to handle
938 /// weak sysmem VMOs, on seeing this field set, can close all handles to
939 /// the buffer and fail any associated request.
940 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
941 /// VMO. Both strong and weak sysmem VMOs can be passed to this call, and
942 /// the VMO handle passed in to this call itself keeps the VMO's info
943 /// alive for purposes of responding to this call. Because of this,
944 /// ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
945 /// handles to the VMO when calling; even if other handles are closed
946 /// before the GetVmoInfo response arrives at the client).
947 /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
948 /// capable of being used with GetVmoInfo due to rights/capability
949 /// attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
950 /// topic [`ZX_INFO_HANDLE_BASIC`].
951 /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
952 /// unspecified reason. See the log for more info.
953 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
954 /// wasn't set, or there was some other problem with the request field(s).
955 pub fn r#get_vmo_info(
956 &self,
957 mut payload: AllocatorGetVmoInfoRequest,
958 ) -> fidl::client::QueryResponseFut<
959 AllocatorGetVmoInfoResult,
960 fidl::encoding::DefaultFuchsiaResourceDialect,
961 > {
962 AllocatorProxyInterface::r#get_vmo_info(self, payload)
963 }
964}
965
966impl AllocatorProxyInterface for AllocatorProxy {
967 fn r#allocate_non_shared_collection(
968 &self,
969 mut payload: AllocatorAllocateNonSharedCollectionRequest,
970 ) -> Result<(), fidl::Error> {
971 self.client.send::<AllocatorAllocateNonSharedCollectionRequest>(
972 &mut payload,
973 0x5ca681f025a80e44,
974 fidl::encoding::DynamicFlags::FLEXIBLE,
975 )
976 }
977
978 fn r#allocate_shared_collection(
979 &self,
980 mut payload: AllocatorAllocateSharedCollectionRequest,
981 ) -> Result<(), fidl::Error> {
982 self.client.send::<AllocatorAllocateSharedCollectionRequest>(
983 &mut payload,
984 0x11a19ff51f0b49c1,
985 fidl::encoding::DynamicFlags::FLEXIBLE,
986 )
987 }
988
989 fn r#bind_shared_collection(
990 &self,
991 mut payload: AllocatorBindSharedCollectionRequest,
992 ) -> Result<(), fidl::Error> {
993 self.client.send::<AllocatorBindSharedCollectionRequest>(
994 &mut payload,
995 0x550916b0dc1d5b4e,
996 fidl::encoding::DynamicFlags::FLEXIBLE,
997 )
998 }
999
1000 type ValidateBufferCollectionTokenResponseFut = fidl::client::QueryResponseFut<
1001 AllocatorValidateBufferCollectionTokenResponse,
1002 fidl::encoding::DefaultFuchsiaResourceDialect,
1003 >;
1004 fn r#validate_buffer_collection_token(
1005 &self,
1006 mut payload: &AllocatorValidateBufferCollectionTokenRequest,
1007 ) -> Self::ValidateBufferCollectionTokenResponseFut {
1008 fn _decode(
1009 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
1010 ) -> Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error> {
1011 let _response = fidl::client::decode_transaction_body::<
1012 fidl::encoding::FlexibleType<AllocatorValidateBufferCollectionTokenResponse>,
1013 fidl::encoding::DefaultFuchsiaResourceDialect,
1014 0x4c5ee91b02a7e68d,
1015 >(_buf?)?
1016 .into_result::<AllocatorMarker>("validate_buffer_collection_token")?;
1017 Ok(_response)
1018 }
1019 self.client.send_query_and_decode::<
1020 AllocatorValidateBufferCollectionTokenRequest,
1021 AllocatorValidateBufferCollectionTokenResponse,
1022 >(
1023 payload,
1024 0x4c5ee91b02a7e68d,
1025 fidl::encoding::DynamicFlags::FLEXIBLE,
1026 _decode,
1027 )
1028 }
1029
1030 fn r#set_debug_client_info(
1031 &self,
1032 mut payload: &AllocatorSetDebugClientInfoRequest,
1033 ) -> Result<(), fidl::Error> {
1034 self.client.send::<AllocatorSetDebugClientInfoRequest>(
1035 payload,
1036 0x6f68f19a3f509c4d,
1037 fidl::encoding::DynamicFlags::FLEXIBLE,
1038 )
1039 }
1040
1041 type GetVmoInfoResponseFut = fidl::client::QueryResponseFut<
1042 AllocatorGetVmoInfoResult,
1043 fidl::encoding::DefaultFuchsiaResourceDialect,
1044 >;
1045 fn r#get_vmo_info(
1046 &self,
1047 mut payload: AllocatorGetVmoInfoRequest,
1048 ) -> Self::GetVmoInfoResponseFut {
1049 fn _decode(
1050 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
1051 ) -> Result<AllocatorGetVmoInfoResult, fidl::Error> {
1052 let _response = fidl::client::decode_transaction_body::<
1053 fidl::encoding::FlexibleResultType<AllocatorGetVmoInfoResponse, Error>,
1054 fidl::encoding::DefaultFuchsiaResourceDialect,
1055 0x21a881120aa0ddf9,
1056 >(_buf?)?
1057 .into_result::<AllocatorMarker>("get_vmo_info")?;
1058 Ok(_response.map(|x| x))
1059 }
1060 self.client.send_query_and_decode::<AllocatorGetVmoInfoRequest, AllocatorGetVmoInfoResult>(
1061 &mut payload,
1062 0x21a881120aa0ddf9,
1063 fidl::encoding::DynamicFlags::FLEXIBLE,
1064 _decode,
1065 )
1066 }
1067}
1068
1069pub struct AllocatorEventStream {
1070 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
1071}
1072
1073impl std::marker::Unpin for AllocatorEventStream {}
1074
1075impl futures::stream::FusedStream for AllocatorEventStream {
1076 fn is_terminated(&self) -> bool {
1077 self.event_receiver.is_terminated()
1078 }
1079}
1080
1081impl futures::Stream for AllocatorEventStream {
1082 type Item = Result<AllocatorEvent, fidl::Error>;
1083
1084 fn poll_next(
1085 mut self: std::pin::Pin<&mut Self>,
1086 cx: &mut std::task::Context<'_>,
1087 ) -> std::task::Poll<Option<Self::Item>> {
1088 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
1089 &mut self.event_receiver,
1090 cx
1091 )?) {
1092 Some(buf) => std::task::Poll::Ready(Some(AllocatorEvent::decode(buf))),
1093 None => std::task::Poll::Ready(None),
1094 }
1095 }
1096}
1097
1098#[derive(Debug)]
1099pub enum AllocatorEvent {
1100 #[non_exhaustive]
1101 _UnknownEvent {
1102 /// Ordinal of the event that was sent.
1103 ordinal: u64,
1104 },
1105}
1106
1107impl AllocatorEvent {
1108 /// Decodes a message buffer as a [`AllocatorEvent`].
1109 fn decode(
1110 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
1111 ) -> Result<AllocatorEvent, fidl::Error> {
1112 let (bytes, _handles) = buf.split_mut();
1113 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
1114 debug_assert_eq!(tx_header.tx_id, 0);
1115 match tx_header.ordinal {
1116 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
1117 Ok(AllocatorEvent::_UnknownEvent { ordinal: tx_header.ordinal })
1118 }
1119 _ => Err(fidl::Error::UnknownOrdinal {
1120 ordinal: tx_header.ordinal,
1121 protocol_name: <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
1122 }),
1123 }
1124 }
1125}
1126
1127/// A Stream of incoming requests for fuchsia.sysmem2/Allocator.
1128pub struct AllocatorRequestStream {
1129 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1130 is_terminated: bool,
1131}
1132
1133impl std::marker::Unpin for AllocatorRequestStream {}
1134
1135impl futures::stream::FusedStream for AllocatorRequestStream {
1136 fn is_terminated(&self) -> bool {
1137 self.is_terminated
1138 }
1139}
1140
1141impl fidl::endpoints::RequestStream for AllocatorRequestStream {
1142 type Protocol = AllocatorMarker;
1143 type ControlHandle = AllocatorControlHandle;
1144
1145 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
1146 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
1147 }
1148
1149 fn control_handle(&self) -> Self::ControlHandle {
1150 AllocatorControlHandle { inner: self.inner.clone() }
1151 }
1152
1153 fn into_inner(
1154 self,
1155 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
1156 {
1157 (self.inner, self.is_terminated)
1158 }
1159
1160 fn from_inner(
1161 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1162 is_terminated: bool,
1163 ) -> Self {
1164 Self { inner, is_terminated }
1165 }
1166}
1167
1168impl futures::Stream for AllocatorRequestStream {
1169 type Item = Result<AllocatorRequest, fidl::Error>;
1170
1171 fn poll_next(
1172 mut self: std::pin::Pin<&mut Self>,
1173 cx: &mut std::task::Context<'_>,
1174 ) -> std::task::Poll<Option<Self::Item>> {
1175 let this = &mut *self;
1176 if this.inner.check_shutdown(cx) {
1177 this.is_terminated = true;
1178 return std::task::Poll::Ready(None);
1179 }
1180 if this.is_terminated {
1181 panic!("polled AllocatorRequestStream after completion");
1182 }
1183 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
1184 |bytes, handles| {
1185 match this.inner.channel().read_etc(cx, bytes, handles) {
1186 std::task::Poll::Ready(Ok(())) => {}
1187 std::task::Poll::Pending => return std::task::Poll::Pending,
1188 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
1189 this.is_terminated = true;
1190 return std::task::Poll::Ready(None);
1191 }
1192 std::task::Poll::Ready(Err(e)) => {
1193 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
1194 e.into(),
1195 ))))
1196 }
1197 }
1198
1199 // A message has been received from the channel
1200 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
1201
1202 std::task::Poll::Ready(Some(match header.ordinal {
1203 0x5ca681f025a80e44 => {
1204 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1205 let mut req = fidl::new_empty!(
1206 AllocatorAllocateNonSharedCollectionRequest,
1207 fidl::encoding::DefaultFuchsiaResourceDialect
1208 );
1209 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorAllocateNonSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1210 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1211 Ok(AllocatorRequest::AllocateNonSharedCollection {
1212 payload: req,
1213 control_handle,
1214 })
1215 }
1216 0x11a19ff51f0b49c1 => {
1217 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1218 let mut req = fidl::new_empty!(
1219 AllocatorAllocateSharedCollectionRequest,
1220 fidl::encoding::DefaultFuchsiaResourceDialect
1221 );
1222 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorAllocateSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1223 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1224 Ok(AllocatorRequest::AllocateSharedCollection {
1225 payload: req,
1226 control_handle,
1227 })
1228 }
1229 0x550916b0dc1d5b4e => {
1230 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1231 let mut req = fidl::new_empty!(
1232 AllocatorBindSharedCollectionRequest,
1233 fidl::encoding::DefaultFuchsiaResourceDialect
1234 );
1235 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorBindSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1236 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1237 Ok(AllocatorRequest::BindSharedCollection { payload: req, control_handle })
1238 }
1239 0x4c5ee91b02a7e68d => {
1240 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
1241 let mut req = fidl::new_empty!(
1242 AllocatorValidateBufferCollectionTokenRequest,
1243 fidl::encoding::DefaultFuchsiaResourceDialect
1244 );
1245 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorValidateBufferCollectionTokenRequest>(&header, _body_bytes, handles, &mut req)?;
1246 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1247 Ok(AllocatorRequest::ValidateBufferCollectionToken {
1248 payload: req,
1249 responder: AllocatorValidateBufferCollectionTokenResponder {
1250 control_handle: std::mem::ManuallyDrop::new(control_handle),
1251 tx_id: header.tx_id,
1252 },
1253 })
1254 }
1255 0x6f68f19a3f509c4d => {
1256 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1257 let mut req = fidl::new_empty!(
1258 AllocatorSetDebugClientInfoRequest,
1259 fidl::encoding::DefaultFuchsiaResourceDialect
1260 );
1261 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
1262 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1263 Ok(AllocatorRequest::SetDebugClientInfo { payload: req, control_handle })
1264 }
1265 0x21a881120aa0ddf9 => {
1266 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
1267 let mut req = fidl::new_empty!(
1268 AllocatorGetVmoInfoRequest,
1269 fidl::encoding::DefaultFuchsiaResourceDialect
1270 );
1271 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorGetVmoInfoRequest>(&header, _body_bytes, handles, &mut req)?;
1272 let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1273 Ok(AllocatorRequest::GetVmoInfo {
1274 payload: req,
1275 responder: AllocatorGetVmoInfoResponder {
1276 control_handle: std::mem::ManuallyDrop::new(control_handle),
1277 tx_id: header.tx_id,
1278 },
1279 })
1280 }
1281 _ if header.tx_id == 0
1282 && header
1283 .dynamic_flags()
1284 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
1285 {
1286 Ok(AllocatorRequest::_UnknownMethod {
1287 ordinal: header.ordinal,
1288 control_handle: AllocatorControlHandle { inner: this.inner.clone() },
1289 method_type: fidl::MethodType::OneWay,
1290 })
1291 }
1292 _ if header
1293 .dynamic_flags()
1294 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
1295 {
1296 this.inner.send_framework_err(
1297 fidl::encoding::FrameworkErr::UnknownMethod,
1298 header.tx_id,
1299 header.ordinal,
1300 header.dynamic_flags(),
1301 (bytes, handles),
1302 )?;
1303 Ok(AllocatorRequest::_UnknownMethod {
1304 ordinal: header.ordinal,
1305 control_handle: AllocatorControlHandle { inner: this.inner.clone() },
1306 method_type: fidl::MethodType::TwoWay,
1307 })
1308 }
1309 _ => Err(fidl::Error::UnknownOrdinal {
1310 ordinal: header.ordinal,
1311 protocol_name:
1312 <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
1313 }),
1314 }))
1315 },
1316 )
1317 }
1318}
1319
1320/// Allocates system memory buffers.
1321///
1322/// Epitaphs are not used in this protocol.
1323#[derive(Debug)]
1324pub enum AllocatorRequest {
1325 /// Allocates a buffer collection on behalf of a single client (aka
1326 /// initiator) who is also the only participant (from the point of view of
1327 /// sysmem).
1328 ///
1329 /// This call exists mainly for temp/testing purposes. This call skips the
1330 /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
1331 /// allow another participant to specify its constraints.
1332 ///
1333 /// Real clients are encouraged to use
1334 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
1335 /// let relevant participants directly convey their own constraints to
1336 /// sysmem by sending `BufferCollectionToken`s to those participants.
1337 ///
1338 /// + request `collection_request` The server end of the
1339 /// [`fuchsia.sysmem2/BufferCollection`].
1340 AllocateNonSharedCollection {
1341 payload: AllocatorAllocateNonSharedCollectionRequest,
1342 control_handle: AllocatorControlHandle,
1343 },
1344 /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
1345 ///
1346 /// The `BufferCollectionToken` can be "duplicated" for distribution to
1347 /// participants by using
1348 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
1349 /// `BufferCollectionToken` can be converted into a
1350 /// [`fuchsia.sysmem2.BufferCollection`] using
1351 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
1352 ///
1353 /// Buffer constraints can be set via
1354 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
1355 ///
1356 /// Success/failure to populate the buffer collection with buffers can be
1357 /// determined from
1358 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
1359 ///
1360 /// Closing the client end of a `BufferCollectionToken` or
1361 /// `BufferCollection` (without `Release` first) will fail all client ends
1362 /// in the same failure domain, which by default is all client ends of the
1363 /// buffer collection. See
1364 /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
1365 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
1366 /// separate failure domains within a buffer collection.
1367 AllocateSharedCollection {
1368 payload: AllocatorAllocateSharedCollectionRequest,
1369 control_handle: AllocatorControlHandle,
1370 },
1371 /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
1372 /// [`fuchsia.sysmem2/BufferCollection`].
1373 ///
1374 /// At the time of sending this message, the buffer collection hasn't yet
1375 /// been populated with buffers - the participant must first also send
1376 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
1377 /// `BufferCollection` client end.
1378 ///
1379 /// All `BufferCollectionToken`(s) duplicated from a root
1380 /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
1381 /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
1382 /// existing `BufferCollection` client ends must have sent `SetConstraints`
1383 /// before the logical BufferCollection will be populated with buffers (or
1384 /// will fail if the overall set of constraints can't be satisfied).
1385 ///
1386 /// + request `token` The client endpoint of a channel whose server end was
1387 /// sent to sysmem using
1388 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
1389 /// end was sent to sysmem using
1390 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. The token is
1391 /// being "turned in" in exchange for a
1392 /// [`fuchsia.sysmem2/BufferCollection`].
1393 /// + request `buffer_collection_request` The server end of a
1394 /// [`fuchsia.sysmem2/BufferCollection`] channel. The sender retains the
1395 /// client end. The `BufferCollection` channel is a single participant's
1396 /// connection to the logical buffer collection. Typically there will be
1397 /// other participants with their own `BufferCollection` channel to the
1398 /// logical buffer collection.
1399 BindSharedCollection {
1400 payload: AllocatorBindSharedCollectionRequest,
1401 control_handle: AllocatorControlHandle,
1402 },
1403 /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
1404 /// the sysmem server.
1405 ///
1406 /// With this call, the client can determine whether an incoming token is a
1407 /// real sysmem token that is known to the sysmem server, without any risk
1408 /// of getting stuck waiting forever on a potentially fake token to complete
1409 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
1410 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
1411 /// FIDL message). In cases where the client trusts the source of the token
1412 /// to provide a real token, this call is not typically needed outside of
1413 /// debugging.
1414 ///
1415 /// If the validate fails sometimes but succeeds other times, the source of
1416 /// the token may itself not be calling
1417 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
1418 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
1419 /// token but before sending the token to the current client. It may be more
1420 /// convenient for the source to use
1421 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
1422 /// token(s), since that call has the sync step built in. Or, the buffer
1423 /// collection may be failing before this call is processed by the sysmem
1424 /// server, as buffer collection failure cleans up sysmem's tracking of
1425 /// associated tokens.
1426 ///
1427 /// This call has no effect on any token.
1428 ///
1429 /// + request `token_server_koid` The koid of the server end of a channel
1430 /// that might be a BufferCollectionToken channel. This can be obtained
1431 /// via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
1432 /// - response `is_known` true means sysmem knew of the token at the time
1433 /// sysmem processed the request, but doesn't guarantee that the token is
1434 /// still valid by the time the client receives the reply. What it does
1435 /// guarantee is that the token at least was a real token, so a two-way
1436 /// call to the token won't stall forever (will fail or succeed fairly
1437 /// quickly, not stall). This can already be known implicitly if the
1438 /// source of the token can be trusted to provide a real token. A false
1439 /// value means the token wasn't known to sysmem at the time sysmem
1440 /// processed this call, but the token may have previously been valid, or
1441 /// may yet become valid. Or if the sender of the token isn't trusted to
1442 /// provide a real token, the token may be fake. It's the responsibility
1443 /// of the sender to sync with sysmem to ensure that previously
1444 /// created/duplicated token(s) are known to sysmem, before sending the
1445 /// token(s) to other participants.
1446 ValidateBufferCollectionToken {
1447 payload: AllocatorValidateBufferCollectionTokenRequest,
1448 responder: AllocatorValidateBufferCollectionTokenResponder,
1449 },
1450 /// Set information about the current client that can be used by sysmem to
1451 /// help diagnose leaking memory and allocation stalls waiting for a
1452 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
1453 ///
1454 /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
1455 /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
1456 /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
1457 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
1458 /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
1459 /// these `BufferCollection`(s) have the same initial debug client info as
1460 /// the token turned in to create the `BufferCollection`).
1461 ///
1462 /// This info can be subsequently overridden on a per-`Node` basis by
1463 /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
1464 ///
1465 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
1466 /// `Allocator` is the most efficient way to ensure that all
1467 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
1468 /// set, and is also more efficient than separately sending the same debug
1469 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
1470 /// created [`fuchsia.sysmem2/Node`].
1471 ///
1472 /// + request `name` This can be an arbitrary string, but the current
1473 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
1474 /// + request `id` This can be an arbitrary id, but the current process ID
1475 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
1476 SetDebugClientInfo {
1477 payload: AllocatorSetDebugClientInfoRequest,
1478 control_handle: AllocatorControlHandle,
1479 },
1480 /// Given a handle to a sysmem-provided VMO, this returns additional info
1481 /// about the corresponding sysmem logical buffer.
1482 ///
1483 /// Most callers will duplicate a VMO handle first and send the duplicate to
1484 /// this call.
1485 ///
1486 /// If the client has created a child VMO of a sysmem-provided VMO, that
1487 /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
1488 ///
1489 /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
1490 /// - response `buffer_collection_id` The buffer collection ID, which is
1491 /// unique per logical buffer collection per boot.
1492 /// - response `buffer_index` The buffer index of the buffer within the
1493 /// buffer collection. This is the same as the index of the buffer within
1494 /// [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
1495 /// is the same for all sysmem-delivered VMOs corresponding to the same
1496 /// logical buffer, even if the VMO koids differ. The `buffer_index` is
1497 /// only unique across buffers of a buffer collection. For a given buffer,
1498 /// the combination of `buffer_collection_id` and `buffer_index` is unique
1499 /// per boot.
1500 /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
1501 /// the `close_weak_asap` field will be set in the response. This handle
1502 /// will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
1503 /// the buffer should be closed as soon as possible. This is signalled
1504 /// shortly after all strong sysmem VMOs to the buffer are closed
1505 /// (including any held indirectly via strong `BufferCollectionToken` or
1506 /// strong `BufferCollection`). Failure to close all weak sysmem VMO
1507 /// handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
1508 /// considered a VMO leak caused by the client still holding a weak sysmem
1509 /// VMO handle and results in loud complaints to the log by sysmem. The
1510 /// buffers of a collection can be freed independently of each other. The
1511 /// `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
1512 /// response arrives at the client. A client that isn't prepared to handle
1513 /// weak sysmem VMOs, on seeing this field set, can close all handles to
1514 /// the buffer and fail any associated request.
1515 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
1516 /// VMO. Both strong and weak sysmem VMOs can be passed to this call, and
1517 /// the VMO handle passed in to this call itself keeps the VMO's info
1518 /// alive for purposes of responding to this call. Because of this,
1519 /// ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
1520 /// handles to the VMO when calling; even if other handles are closed
1521 /// before the GetVmoInfo response arrives at the client).
1522 /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
1523 /// capable of being used with GetVmoInfo due to rights/capability
1524 /// attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
1525 /// topic [`ZX_INFO_HANDLE_BASIC`].
1526 /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
1527 /// unspecified reason. See the log for more info.
1528 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
1529 /// wasn't set, or there was some other problem with the request field(s).
1530 GetVmoInfo { payload: AllocatorGetVmoInfoRequest, responder: AllocatorGetVmoInfoResponder },
1531 /// An interaction was received which does not match any known method.
1532 #[non_exhaustive]
1533 _UnknownMethod {
1534 /// Ordinal of the method that was called.
1535 ordinal: u64,
1536 control_handle: AllocatorControlHandle,
1537 method_type: fidl::MethodType,
1538 },
1539}
1540
1541impl AllocatorRequest {
1542 #[allow(irrefutable_let_patterns)]
1543 pub fn into_allocate_non_shared_collection(
1544 self,
1545 ) -> Option<(AllocatorAllocateNonSharedCollectionRequest, AllocatorControlHandle)> {
1546 if let AllocatorRequest::AllocateNonSharedCollection { payload, control_handle } = self {
1547 Some((payload, control_handle))
1548 } else {
1549 None
1550 }
1551 }
1552
1553 #[allow(irrefutable_let_patterns)]
1554 pub fn into_allocate_shared_collection(
1555 self,
1556 ) -> Option<(AllocatorAllocateSharedCollectionRequest, AllocatorControlHandle)> {
1557 if let AllocatorRequest::AllocateSharedCollection { payload, control_handle } = self {
1558 Some((payload, control_handle))
1559 } else {
1560 None
1561 }
1562 }
1563
1564 #[allow(irrefutable_let_patterns)]
1565 pub fn into_bind_shared_collection(
1566 self,
1567 ) -> Option<(AllocatorBindSharedCollectionRequest, AllocatorControlHandle)> {
1568 if let AllocatorRequest::BindSharedCollection { payload, control_handle } = self {
1569 Some((payload, control_handle))
1570 } else {
1571 None
1572 }
1573 }
1574
1575 #[allow(irrefutable_let_patterns)]
1576 pub fn into_validate_buffer_collection_token(
1577 self,
1578 ) -> Option<(
1579 AllocatorValidateBufferCollectionTokenRequest,
1580 AllocatorValidateBufferCollectionTokenResponder,
1581 )> {
1582 if let AllocatorRequest::ValidateBufferCollectionToken { payload, responder } = self {
1583 Some((payload, responder))
1584 } else {
1585 None
1586 }
1587 }
1588
1589 #[allow(irrefutable_let_patterns)]
1590 pub fn into_set_debug_client_info(
1591 self,
1592 ) -> Option<(AllocatorSetDebugClientInfoRequest, AllocatorControlHandle)> {
1593 if let AllocatorRequest::SetDebugClientInfo { payload, control_handle } = self {
1594 Some((payload, control_handle))
1595 } else {
1596 None
1597 }
1598 }
1599
1600 #[allow(irrefutable_let_patterns)]
1601 pub fn into_get_vmo_info(
1602 self,
1603 ) -> Option<(AllocatorGetVmoInfoRequest, AllocatorGetVmoInfoResponder)> {
1604 if let AllocatorRequest::GetVmoInfo { payload, responder } = self {
1605 Some((payload, responder))
1606 } else {
1607 None
1608 }
1609 }
1610
1611 /// Name of the method defined in FIDL
1612 pub fn method_name(&self) -> &'static str {
1613 match *self {
1614 AllocatorRequest::AllocateNonSharedCollection { .. } => {
1615 "allocate_non_shared_collection"
1616 }
1617 AllocatorRequest::AllocateSharedCollection { .. } => "allocate_shared_collection",
1618 AllocatorRequest::BindSharedCollection { .. } => "bind_shared_collection",
1619 AllocatorRequest::ValidateBufferCollectionToken { .. } => {
1620 "validate_buffer_collection_token"
1621 }
1622 AllocatorRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
1623 AllocatorRequest::GetVmoInfo { .. } => "get_vmo_info",
1624 AllocatorRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
1625 "unknown one-way method"
1626 }
1627 AllocatorRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
1628 "unknown two-way method"
1629 }
1630 }
1631 }
1632}
1633
1634#[derive(Debug, Clone)]
1635pub struct AllocatorControlHandle {
1636 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1637}
1638
1639impl fidl::endpoints::ControlHandle for AllocatorControlHandle {
1640 fn shutdown(&self) {
1641 self.inner.shutdown()
1642 }
1643 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
1644 self.inner.shutdown_with_epitaph(status)
1645 }
1646
1647 fn is_closed(&self) -> bool {
1648 self.inner.channel().is_closed()
1649 }
1650 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
1651 self.inner.channel().on_closed()
1652 }
1653
1654 #[cfg(target_os = "fuchsia")]
1655 fn signal_peer(
1656 &self,
1657 clear_mask: zx::Signals,
1658 set_mask: zx::Signals,
1659 ) -> Result<(), zx_status::Status> {
1660 use fidl::Peered;
1661 self.inner.channel().signal_peer(clear_mask, set_mask)
1662 }
1663}
1664
1665impl AllocatorControlHandle {}
1666
1667#[must_use = "FIDL methods require a response to be sent"]
1668#[derive(Debug)]
1669pub struct AllocatorValidateBufferCollectionTokenResponder {
1670 control_handle: std::mem::ManuallyDrop<AllocatorControlHandle>,
1671 tx_id: u32,
1672}
1673
1674/// Set the the channel to be shutdown (see [`AllocatorControlHandle::shutdown`])
1675/// if the responder is dropped without sending a response, so that the client
1676/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
1677impl std::ops::Drop for AllocatorValidateBufferCollectionTokenResponder {
1678 fn drop(&mut self) {
1679 self.control_handle.shutdown();
1680 // Safety: drops once, never accessed again
1681 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1682 }
1683}
1684
1685impl fidl::endpoints::Responder for AllocatorValidateBufferCollectionTokenResponder {
1686 type ControlHandle = AllocatorControlHandle;
1687
1688 fn control_handle(&self) -> &AllocatorControlHandle {
1689 &self.control_handle
1690 }
1691
1692 fn drop_without_shutdown(mut self) {
1693 // Safety: drops once, never accessed again due to mem::forget
1694 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1695 // Prevent Drop from running (which would shut down the channel)
1696 std::mem::forget(self);
1697 }
1698}
1699
1700impl AllocatorValidateBufferCollectionTokenResponder {
1701 /// Sends a response to the FIDL transaction.
1702 ///
1703 /// Sets the channel to shutdown if an error occurs.
1704 pub fn send(
1705 self,
1706 mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1707 ) -> Result<(), fidl::Error> {
1708 let _result = self.send_raw(payload);
1709 if _result.is_err() {
1710 self.control_handle.shutdown();
1711 }
1712 self.drop_without_shutdown();
1713 _result
1714 }
1715
1716 /// Similar to "send" but does not shutdown the channel if an error occurs.
1717 pub fn send_no_shutdown_on_err(
1718 self,
1719 mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1720 ) -> Result<(), fidl::Error> {
1721 let _result = self.send_raw(payload);
1722 self.drop_without_shutdown();
1723 _result
1724 }
1725
1726 fn send_raw(
1727 &self,
1728 mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1729 ) -> Result<(), fidl::Error> {
1730 self.control_handle.inner.send::<fidl::encoding::FlexibleType<
1731 AllocatorValidateBufferCollectionTokenResponse,
1732 >>(
1733 fidl::encoding::Flexible::new(payload),
1734 self.tx_id,
1735 0x4c5ee91b02a7e68d,
1736 fidl::encoding::DynamicFlags::FLEXIBLE,
1737 )
1738 }
1739}
1740
1741#[must_use = "FIDL methods require a response to be sent"]
1742#[derive(Debug)]
1743pub struct AllocatorGetVmoInfoResponder {
1744 control_handle: std::mem::ManuallyDrop<AllocatorControlHandle>,
1745 tx_id: u32,
1746}
1747
1748/// Set the the channel to be shutdown (see [`AllocatorControlHandle::shutdown`])
1749/// if the responder is dropped without sending a response, so that the client
1750/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
1751impl std::ops::Drop for AllocatorGetVmoInfoResponder {
1752 fn drop(&mut self) {
1753 self.control_handle.shutdown();
1754 // Safety: drops once, never accessed again
1755 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1756 }
1757}
1758
1759impl fidl::endpoints::Responder for AllocatorGetVmoInfoResponder {
1760 type ControlHandle = AllocatorControlHandle;
1761
1762 fn control_handle(&self) -> &AllocatorControlHandle {
1763 &self.control_handle
1764 }
1765
1766 fn drop_without_shutdown(mut self) {
1767 // Safety: drops once, never accessed again due to mem::forget
1768 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1769 // Prevent Drop from running (which would shut down the channel)
1770 std::mem::forget(self);
1771 }
1772}
1773
1774impl AllocatorGetVmoInfoResponder {
1775 /// Sends a response to the FIDL transaction.
1776 ///
1777 /// Sets the channel to shutdown if an error occurs.
1778 pub fn send(
1779 self,
1780 mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1781 ) -> Result<(), fidl::Error> {
1782 let _result = self.send_raw(result);
1783 if _result.is_err() {
1784 self.control_handle.shutdown();
1785 }
1786 self.drop_without_shutdown();
1787 _result
1788 }
1789
1790 /// Similar to "send" but does not shutdown the channel if an error occurs.
1791 pub fn send_no_shutdown_on_err(
1792 self,
1793 mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1794 ) -> Result<(), fidl::Error> {
1795 let _result = self.send_raw(result);
1796 self.drop_without_shutdown();
1797 _result
1798 }
1799
1800 fn send_raw(
1801 &self,
1802 mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1803 ) -> Result<(), fidl::Error> {
1804 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
1805 AllocatorGetVmoInfoResponse,
1806 Error,
1807 >>(
1808 fidl::encoding::FlexibleResult::new(result.as_mut().map_err(|e| *e)),
1809 self.tx_id,
1810 0x21a881120aa0ddf9,
1811 fidl::encoding::DynamicFlags::FLEXIBLE,
1812 )
1813 }
1814}
1815
1816#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
1817pub struct BufferCollectionMarker;
1818
1819impl fidl::endpoints::ProtocolMarker for BufferCollectionMarker {
1820 type Proxy = BufferCollectionProxy;
1821 type RequestStream = BufferCollectionRequestStream;
1822 #[cfg(target_os = "fuchsia")]
1823 type SynchronousProxy = BufferCollectionSynchronousProxy;
1824
1825 const DEBUG_NAME: &'static str = "(anonymous) BufferCollection";
1826}
1827pub type BufferCollectionWaitForAllBuffersAllocatedResult =
1828 Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>;
1829pub type BufferCollectionCheckAllBuffersAllocatedResult = Result<(), Error>;
1830
1831pub trait BufferCollectionProxyInterface: Send + Sync {
1832 type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
1833 fn r#sync(&self) -> Self::SyncResponseFut;
1834 fn r#release(&self) -> Result<(), fidl::Error>;
1835 fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
1836 fn r#set_debug_client_info(
1837 &self,
1838 payload: &NodeSetDebugClientInfoRequest,
1839 ) -> Result<(), fidl::Error>;
1840 fn r#set_debug_timeout_log_deadline(
1841 &self,
1842 payload: &NodeSetDebugTimeoutLogDeadlineRequest,
1843 ) -> Result<(), fidl::Error>;
1844 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
1845 type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
1846 + Send;
1847 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
1848 type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
1849 + Send;
1850 fn r#is_alternate_for(
1851 &self,
1852 payload: NodeIsAlternateForRequest,
1853 ) -> Self::IsAlternateForResponseFut;
1854 type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
1855 + Send;
1856 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
1857 fn r#set_weak(&self) -> Result<(), fidl::Error>;
1858 fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
1859 fn r#attach_node_tracking(
1860 &self,
1861 payload: NodeAttachNodeTrackingRequest,
1862 ) -> Result<(), fidl::Error>;
1863 fn r#set_constraints(
1864 &self,
1865 payload: BufferCollectionSetConstraintsRequest,
1866 ) -> Result<(), fidl::Error>;
1867 type WaitForAllBuffersAllocatedResponseFut: std::future::Future<
1868 Output = Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error>,
1869 > + Send;
1870 fn r#wait_for_all_buffers_allocated(&self) -> Self::WaitForAllBuffersAllocatedResponseFut;
1871 type CheckAllBuffersAllocatedResponseFut: std::future::Future<
1872 Output = Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error>,
1873 > + Send;
1874 fn r#check_all_buffers_allocated(&self) -> Self::CheckAllBuffersAllocatedResponseFut;
1875 fn r#attach_token(
1876 &self,
1877 payload: BufferCollectionAttachTokenRequest,
1878 ) -> Result<(), fidl::Error>;
1879 fn r#attach_lifetime_tracking(
1880 &self,
1881 payload: BufferCollectionAttachLifetimeTrackingRequest,
1882 ) -> Result<(), fidl::Error>;
1883}
1884#[derive(Debug)]
1885#[cfg(target_os = "fuchsia")]
1886pub struct BufferCollectionSynchronousProxy {
1887 client: fidl::client::sync::Client,
1888}
1889
1890#[cfg(target_os = "fuchsia")]
1891impl fidl::endpoints::SynchronousProxy for BufferCollectionSynchronousProxy {
1892 type Proxy = BufferCollectionProxy;
1893 type Protocol = BufferCollectionMarker;
1894
1895 fn from_channel(inner: fidl::Channel) -> Self {
1896 Self::new(inner)
1897 }
1898
1899 fn into_channel(self) -> fidl::Channel {
1900 self.client.into_channel()
1901 }
1902
1903 fn as_channel(&self) -> &fidl::Channel {
1904 self.client.as_channel()
1905 }
1906}
1907
1908#[cfg(target_os = "fuchsia")]
1909impl BufferCollectionSynchronousProxy {
1910 pub fn new(channel: fidl::Channel) -> Self {
1911 let protocol_name = <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
1912 Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
1913 }
1914
1915 pub fn into_channel(self) -> fidl::Channel {
1916 self.client.into_channel()
1917 }
1918
1919 /// Waits until an event arrives and returns it. It is safe for other
1920 /// threads to make concurrent requests while waiting for an event.
1921 pub fn wait_for_event(
1922 &self,
1923 deadline: zx::MonotonicInstant,
1924 ) -> Result<BufferCollectionEvent, fidl::Error> {
1925 BufferCollectionEvent::decode(self.client.wait_for_event(deadline)?)
1926 }
1927
1928 /// Ensure that previous messages have been received server side. This is
1929 /// particularly useful after previous messages that created new tokens,
1930 /// because a token must be known to the sysmem server before sending the
1931 /// token to another participant.
1932 ///
1933 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
1934 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
1935 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
1936 /// to mitigate the possibility of a hostile/fake
1937 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
1938 /// Another way is to pass the token to
1939 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
1940 /// the token as part of exchanging it for a
1941 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
1942 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
1943 /// of stalling.
1944 ///
1945 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
1946 /// and then starting and completing a `Sync`, it's then safe to send the
1947 /// `BufferCollectionToken` client ends to other participants knowing the
1948 /// server will recognize the tokens when they're sent by the other
1949 /// participants to sysmem in a
1950 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
1951 /// efficient way to create tokens while avoiding unnecessary round trips.
1952 ///
1953 /// Other options include waiting for each
1954 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
1955 /// individually (using separate call to `Sync` after each), or calling
1956 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
1957 /// converted to a `BufferCollection` via
1958 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
1959 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
1960 /// the sync step and can create multiple tokens at once.
1961 pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
1962 let _response = self.client.send_query::<
1963 fidl::encoding::EmptyPayload,
1964 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
1965 >(
1966 (),
1967 0x11ac2555cf575b54,
1968 fidl::encoding::DynamicFlags::FLEXIBLE,
1969 ___deadline,
1970 )?
1971 .into_result::<BufferCollectionMarker>("sync")?;
1972 Ok(_response)
1973 }
1974
1975 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
1976 ///
1977 /// Normally a participant will convert a `BufferCollectionToken` into a
1978 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
1979 /// `Release` via the token (and then close the channel immediately or
1980 /// shortly later in response to server closing the server end), which
1981 /// avoids causing buffer collection failure. Without a prior `Release`,
1982 /// closing the `BufferCollectionToken` client end will cause buffer
1983 /// collection failure.
1984 ///
1985 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
1986 ///
1987 /// By default the server handles unexpected closure of a
1988 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
1989 /// first) by failing the buffer collection. Partly this is to expedite
1990 /// closing VMO handles to reclaim memory when any participant fails. If a
1991 /// participant would like to cleanly close a `BufferCollection` without
1992 /// causing buffer collection failure, the participant can send `Release`
1993 /// before closing the `BufferCollection` client end. The `Release` can
1994 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
1995 /// buffer collection won't require constraints from this node in order to
1996 /// allocate. If after `SetConstraints`, the constraints are retained and
1997 /// aggregated, despite the lack of `BufferCollection` connection at the
1998 /// time of constraints aggregation.
1999 ///
2000 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
2001 ///
2002 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
2003 /// end (without `Release` first) will trigger failure of the buffer
2004 /// collection. To close a `BufferCollectionTokenGroup` channel without
2005 /// failing the buffer collection, ensure that AllChildrenPresent() has been
2006 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
2007 /// client end.
2008 ///
2009 /// If `Release` occurs before
2010 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
2011 /// buffer collection will fail (triggered by reception of `Release` without
2012 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
2013 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
2014 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
2015 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
2016 /// close requires `AllChildrenPresent` (if not already sent), then
2017 /// `Release`, then close client end.
2018 ///
2019 /// If `Release` occurs after `AllChildrenPresent`, the children and all
2020 /// their constraints remain intact (just as they would if the
2021 /// `BufferCollectionTokenGroup` channel had remained open), and the client
2022 /// end close doesn't trigger buffer collection failure.
2023 ///
2024 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
2025 ///
2026 /// For brevity, the per-channel-protocol paragraphs above ignore the
2027 /// separate failure domain created by
2028 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
2029 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
2030 /// unexpectedly closes (without `Release` first) and that client end is
2031 /// under a failure domain, instead of failing the whole buffer collection,
2032 /// the failure domain is failed, but the buffer collection itself is
2033 /// isolated from failure of the failure domain. Such failure domains can be
2034 /// nested, in which case only the inner-most failure domain in which the
2035 /// `Node` resides fails.
2036 pub fn r#release(&self) -> Result<(), fidl::Error> {
2037 self.client.send::<fidl::encoding::EmptyPayload>(
2038 (),
2039 0x6a5cae7d6d6e04c6,
2040 fidl::encoding::DynamicFlags::FLEXIBLE,
2041 )
2042 }
2043
2044 /// Set a name for VMOs in this buffer collection.
2045 ///
2046 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
2047 /// will be truncated to fit. The name of the vmo will be suffixed with the
2048 /// buffer index within the collection (if the suffix fits within
2049 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
2050 /// listed in the inspect data.
2051 ///
2052 /// The name only affects VMOs allocated after the name is set; this call
2053 /// does not rename existing VMOs. If multiple clients set different names
2054 /// then the larger priority value will win. Setting a new name with the
2055 /// same priority as a prior name doesn't change the name.
2056 ///
2057 /// All table fields are currently required.
2058 ///
2059 /// + request `priority` The name is only set if this is the first `SetName`
2060 /// or if `priority` is greater than any previous `priority` value in
2061 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
2062 /// + request `name` The name for VMOs created under this buffer collection.
2063 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
2064 self.client.send::<NodeSetNameRequest>(
2065 payload,
2066 0xb41f1624f48c1e9,
2067 fidl::encoding::DynamicFlags::FLEXIBLE,
2068 )
2069 }
2070
2071 /// Set information about the current client that can be used by sysmem to
2072 /// help diagnose leaking memory and allocation stalls waiting for a
2073 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
2074 ///
2075 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
2076 /// `Node`(s) derived from this `Node`, unless overriden by
2077 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
2078 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
2079 ///
2080 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
2081 /// `Allocator` is the most efficient way to ensure that all
2082 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
2083 /// set, and is also more efficient than separately sending the same debug
2084 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
2085 /// created [`fuchsia.sysmem2/Node`].
2086 ///
2087 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
2088 /// indicate which client is closing their channel first, leading to subtree
2089 /// failure (which can be normal if the purpose of the subtree is over, but
2090 /// if happening earlier than expected, the client-channel-specific name can
2091 /// help diagnose where the failure is first coming from, from sysmem's
2092 /// point of view).
2093 ///
2094 /// All table fields are currently required.
2095 ///
2096 /// + request `name` This can be an arbitrary string, but the current
2097 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
2098 /// + request `id` This can be an arbitrary id, but the current process ID
2099 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
2100 pub fn r#set_debug_client_info(
2101 &self,
2102 mut payload: &NodeSetDebugClientInfoRequest,
2103 ) -> Result<(), fidl::Error> {
2104 self.client.send::<NodeSetDebugClientInfoRequest>(
2105 payload,
2106 0x5cde8914608d99b1,
2107 fidl::encoding::DynamicFlags::FLEXIBLE,
2108 )
2109 }
2110
2111 /// Sysmem logs a warning if sysmem hasn't seen
2112 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
2113 /// within 5 seconds after creation of a new collection.
2114 ///
2115 /// Clients can call this method to change when the log is printed. If
2116 /// multiple client set the deadline, it's unspecified which deadline will
2117 /// take effect.
2118 ///
2119 /// In most cases the default works well.
2120 ///
2121 /// All table fields are currently required.
2122 ///
2123 /// + request `deadline` The time at which sysmem will start trying to log
2124 /// the warning, unless all constraints are with sysmem by then.
2125 pub fn r#set_debug_timeout_log_deadline(
2126 &self,
2127 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
2128 ) -> Result<(), fidl::Error> {
2129 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
2130 payload,
2131 0x716b0af13d5c0806,
2132 fidl::encoding::DynamicFlags::FLEXIBLE,
2133 )
2134 }
2135
2136 /// This enables verbose logging for the buffer collection.
2137 ///
2138 /// Verbose logging includes constraints set via
2139 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
2140 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
2141 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
2142 /// the tree of `Node`(s).
2143 ///
2144 /// Normally sysmem prints only a single line complaint when aggregation
2145 /// fails, with just the specific detailed reason that aggregation failed,
2146 /// with little surrounding context. While this is often enough to diagnose
2147 /// a problem if only a small change was made and everything was working
2148 /// before the small change, it's often not particularly helpful for getting
2149 /// a new buffer collection to work for the first time. Especially with
2150 /// more complex trees of nodes, involving things like
2151 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
2152 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
2153 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
2154 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
2155 /// looks like and why it's failing a logical allocation, or why a tree or
2156 /// subtree is failing sooner than expected.
2157 ///
2158 /// The intent of the extra logging is to be acceptable from a performance
2159 /// point of view, under the assumption that verbose logging is only enabled
2160 /// on a low number of buffer collections. If we're not tracking down a bug,
2161 /// we shouldn't send this message.
2162 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
2163 self.client.send::<fidl::encoding::EmptyPayload>(
2164 (),
2165 0x5209c77415b4dfad,
2166 fidl::encoding::DynamicFlags::FLEXIBLE,
2167 )
2168 }
2169
2170 /// This gets a handle that can be used as a parameter to
2171 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
2172 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
2173 /// client obtained this handle from this `Node`.
2174 ///
2175 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
2176 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
2177 /// despite the two calls typically being on different channels.
2178 ///
2179 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
2180 ///
2181 /// All table fields are currently required.
2182 ///
2183 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
2184 /// different `Node` channel, to prove that the client obtained the handle
2185 /// from this `Node`.
2186 pub fn r#get_node_ref(
2187 &self,
2188 ___deadline: zx::MonotonicInstant,
2189 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
2190 let _response = self.client.send_query::<
2191 fidl::encoding::EmptyPayload,
2192 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
2193 >(
2194 (),
2195 0x5b3d0e51614df053,
2196 fidl::encoding::DynamicFlags::FLEXIBLE,
2197 ___deadline,
2198 )?
2199 .into_result::<BufferCollectionMarker>("get_node_ref")?;
2200 Ok(_response)
2201 }
2202
2203 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
2204 /// rooted at a different child token of a common parent
2205 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
2206 /// passed-in `node_ref`.
2207 ///
2208 /// This call is for assisting with admission control de-duplication, and
2209 /// with debugging.
2210 ///
2211 /// The `node_ref` must be obtained using
2212 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
2213 ///
2214 /// The `node_ref` can be a duplicated handle; it's not necessary to call
2215 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
2216 ///
2217 /// If a calling token may not actually be a valid token at all due to a
2218 /// potentially hostile/untrusted provider of the token, call
2219 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
2220 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
2221 /// never responds due to a calling token not being a real token (not really
2222 /// talking to sysmem). Another option is to call
2223 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
2224 /// which also validates the token along with converting it to a
2225 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
2226 ///
2227 /// All table fields are currently required.
2228 ///
2229 /// - response `is_alternate`
2230 /// - true: The first parent node in common between the calling node and
2231 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
2232 /// that the calling `Node` and the `node_ref` `Node` will not have both
2233 /// their constraints apply - rather sysmem will choose one or the other
2234 /// of the constraints - never both. This is because only one child of
2235 /// a `BufferCollectionTokenGroup` is selected during logical
2236 /// allocation, with only that one child's subtree contributing to
2237 /// constraints aggregation.
2238 /// - false: The first parent node in common between the calling `Node`
2239 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
2240 /// Currently, this means the first parent node in common is a
2241 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
2242 /// `Release`ed). This means that the calling `Node` and the `node_ref`
2243 /// `Node` may have both their constraints apply during constraints
2244 /// aggregation of the logical allocation, if both `Node`(s) are
2245 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
2246 /// this case, there is no `BufferCollectionTokenGroup` that will
2247 /// directly prevent the two `Node`(s) from both being selected and
2248 /// their constraints both aggregated, but even when false, one or both
2249 /// `Node`(s) may still be eliminated from consideration if one or both
2250 /// `Node`(s) has a direct or indirect parent
2251 /// `BufferCollectionTokenGroup` which selects a child subtree other
2252 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
2253 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
2254 /// associated with the same buffer collection as the calling `Node`.
2255 /// Another reason for this error is if the `node_ref` is an
2256 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
2257 /// a real `node_ref` obtained from `GetNodeRef`.
2258 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
2259 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
2260 /// the needed rights expected on a real `node_ref`.
2261 /// * No other failing status codes are returned by this call. However,
2262 /// sysmem may add additional codes in future, so the client should have
2263 /// sensible default handling for any failing status code.
2264 pub fn r#is_alternate_for(
2265 &self,
2266 mut payload: NodeIsAlternateForRequest,
2267 ___deadline: zx::MonotonicInstant,
2268 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
2269 let _response = self.client.send_query::<
2270 NodeIsAlternateForRequest,
2271 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
2272 >(
2273 &mut payload,
2274 0x3a58e00157e0825,
2275 fidl::encoding::DynamicFlags::FLEXIBLE,
2276 ___deadline,
2277 )?
2278 .into_result::<BufferCollectionMarker>("is_alternate_for")?;
2279 Ok(_response.map(|x| x))
2280 }
2281
2282 /// Get the buffer collection ID. This ID is also available from
2283 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
2284 /// within the collection).
2285 ///
2286 /// This call is mainly useful in situations where we can't convey a
2287 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
2288 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
2289 /// handle, which can be joined back up with a `BufferCollection` client end
2290 /// that was created via a different path. Prefer to convey a
2291 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
2292 ///
2293 /// Trusting a `buffer_collection_id` value from a source other than sysmem
2294 /// is analogous to trusting a koid value from a source other than zircon.
2295 /// Both should be avoided unless really necessary, and both require
2296 /// caution. In some situations it may be reasonable to refer to a
2297 /// pre-established `BufferCollection` by `buffer_collection_id` via a
2298 /// protocol for efficiency reasons, but an incoming value purporting to be
2299 /// a `buffer_collection_id` is not sufficient alone to justify granting the
2300 /// sender of the `buffer_collection_id` any capability. The sender must
2301 /// first prove to a receiver that the sender has/had a VMO or has/had a
2302 /// `BufferCollectionToken` to the same collection by sending a handle that
2303 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
2304 /// `buffer_collection_id` value. The receiver should take care to avoid
2305 /// assuming that a sender had a `BufferCollectionToken` in cases where the
2306 /// sender has only proven that the sender had a VMO.
2307 ///
2308 /// - response `buffer_collection_id` This ID is unique per buffer
2309 /// collection per boot. Each buffer is uniquely identified by the
2310 /// `buffer_collection_id` and `buffer_index` together.
2311 pub fn r#get_buffer_collection_id(
2312 &self,
2313 ___deadline: zx::MonotonicInstant,
2314 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
2315 let _response = self.client.send_query::<
2316 fidl::encoding::EmptyPayload,
2317 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
2318 >(
2319 (),
2320 0x77d19a494b78ba8c,
2321 fidl::encoding::DynamicFlags::FLEXIBLE,
2322 ___deadline,
2323 )?
2324 .into_result::<BufferCollectionMarker>("get_buffer_collection_id")?;
2325 Ok(_response)
2326 }
2327
2328 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
2329 /// created after this message to weak, which means that a client's `Node`
2330 /// client end (or a child created after this message) is not alone
2331 /// sufficient to keep allocated VMOs alive.
2332 ///
2333 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
2334 /// `close_weak_asap`.
2335 ///
2336 /// This message is only permitted before the `Node` becomes ready for
2337 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
2338 /// * `BufferCollectionToken`: any time
2339 /// * `BufferCollection`: before `SetConstraints`
2340 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
2341 ///
2342 /// Currently, no conversion from strong `Node` to weak `Node` after ready
2343 /// for allocation is provided, but a client can simulate that by creating
2344 /// an additional `Node` before allocation and setting that additional
2345 /// `Node` to weak, and then potentially at some point later sending
2346 /// `Release` and closing the client end of the client's strong `Node`, but
2347 /// keeping the client's weak `Node`.
2348 ///
2349 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
2350 /// collection failure (all `Node` client end(s) will see
2351 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
2352 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
2353 /// this situation until all `Node`(s) are ready for allocation. For initial
2354 /// allocation to succeed, at least one strong `Node` is required to exist
2355 /// at allocation time, but after that client receives VMO handles, that
2356 /// client can `BufferCollection.Release` and close the client end without
2357 /// causing this type of failure.
2358 ///
2359 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
2360 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
2361 /// separately as appropriate.
2362 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
2363 self.client.send::<fidl::encoding::EmptyPayload>(
2364 (),
2365 0x22dd3ea514eeffe1,
2366 fidl::encoding::DynamicFlags::FLEXIBLE,
2367 )
2368 }
2369
2370 /// This indicates to sysmem that the client is prepared to pay attention to
2371 /// `close_weak_asap`.
2372 ///
2373 /// If sent, this message must be before
2374 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
2375 ///
2376 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
2377 /// send this message before `WaitForAllBuffersAllocated`, or a parent
2378 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
2379 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
2380 /// trigger buffer collection failure.
2381 ///
2382 /// This message is necessary because weak sysmem VMOs have not always been
2383 /// a thing, so older clients are not aware of the need to pay attention to
2384 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
2385 /// sysmem weak VMO handles asap. By having this message and requiring
2386 /// participants to indicate their acceptance of this aspect of the overall
2387 /// protocol, we avoid situations where an older client is delivered a weak
2388 /// VMO without any way for sysmem to get that VMO to close quickly later
2389 /// (and on a per-buffer basis).
2390 ///
2391 /// A participant that doesn't handle `close_weak_asap` and also doesn't
2392 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
2393 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
2394 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
2395 /// same participant has a child/delegate which does retrieve VMOs, that
2396 /// child/delegate will need to send `SetWeakOk` before
2397 /// `WaitForAllBuffersAllocated`.
2398 ///
2399 /// + request `for_child_nodes_also` If present and true, this means direct
2400 /// child nodes of this node created after this message plus all
2401 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
2402 /// those nodes. Any child node of this node that was created before this
2403 /// message is not included. This setting is "sticky" in the sense that a
2404 /// subsequent `SetWeakOk` without this bool set to true does not reset
2405 /// the server-side bool. If this creates a problem for a participant, a
2406 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
2407 /// tokens instead, as appropriate. A participant should only set
2408 /// `for_child_nodes_also` true if the participant can really promise to
2409 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
2410 /// weak VMO handles held by participants holding the corresponding child
2411 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
2412 /// which are using sysmem(1) can be weak, despite the clients of those
2413 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
2414 /// direct way to find out about `close_weak_asap`. This only applies to
2415 /// descendents of this `Node` which are using sysmem(1), not to this
2416 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
2417 /// token, which will fail allocation unless an ancestor of this `Node`
2418 /// specified `for_child_nodes_also` true.
2419 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
2420 self.client.send::<NodeSetWeakOkRequest>(
2421 &mut payload,
2422 0x38a44fc4d7724be9,
2423 fidl::encoding::DynamicFlags::FLEXIBLE,
2424 )
2425 }
2426
2427 /// The server_end will be closed after this `Node` and any child nodes have
2428 /// have released their buffer counts, making those counts available for
2429 /// reservation by a different `Node` via
2430 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
2431 ///
2432 /// The `Node` buffer counts may not be released until the entire tree of
2433 /// `Node`(s) is closed or failed, because
2434 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
2435 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
2436 /// `Node` buffer counts remain reserved until the orphaned node is later
2437 /// cleaned up.
2438 ///
2439 /// If the `Node` exceeds a fairly large number of attached eventpair server
2440 /// ends, a log message will indicate this and the `Node` (and the
2441 /// appropriate) sub-tree will fail.
2442 ///
2443 /// The `server_end` will remain open when
2444 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
2445 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
2446 /// [`fuchsia.sysmem2/BufferCollection`].
2447 ///
2448 /// This message can also be used with a
2449 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
2450 pub fn r#attach_node_tracking(
2451 &self,
2452 mut payload: NodeAttachNodeTrackingRequest,
2453 ) -> Result<(), fidl::Error> {
2454 self.client.send::<NodeAttachNodeTrackingRequest>(
2455 &mut payload,
2456 0x3f22f2a293d3cdac,
2457 fidl::encoding::DynamicFlags::FLEXIBLE,
2458 )
2459 }
2460
2461 /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
2462 /// collection.
2463 ///
2464 /// A participant may only call
2465 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
2466 /// [`fuchsia.sysmem2/BufferCollection`].
2467 ///
2468 /// For buffer allocation to be attempted, all holders of a
2469 /// `BufferCollection` client end need to call `SetConstraints` before
2470 /// sysmem will attempt to allocate buffers.
2471 ///
2472 /// + request `constraints` These are the constraints on the buffer
2473 /// collection imposed by the sending client/participant. The
2474 /// `constraints` field is not required to be set. If not set, the client
2475 /// is not setting any actual constraints, but is indicating that the
2476 /// client has no constraints to set. A client that doesn't set the
2477 /// `constraints` field won't receive any VMO handles, but can still find
2478 /// out how many buffers were allocated and can still refer to buffers by
2479 /// their `buffer_index`.
2480 pub fn r#set_constraints(
2481 &self,
2482 mut payload: BufferCollectionSetConstraintsRequest,
2483 ) -> Result<(), fidl::Error> {
2484 self.client.send::<BufferCollectionSetConstraintsRequest>(
2485 &mut payload,
2486 0x1fde0f19d650197b,
2487 fidl::encoding::DynamicFlags::FLEXIBLE,
2488 )
2489 }
2490
2491 /// Wait until all buffers are allocated.
2492 ///
2493 /// This FIDL call completes when buffers have been allocated, or completes
2494 /// with some failure detail if allocation has been attempted but failed.
2495 ///
2496 /// The following must occur before buffers will be allocated:
2497 /// * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
2498 /// collection must be turned in via `BindSharedCollection` to get a
2499 /// [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
2500 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
2501 /// or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
2502 /// to them.
2503 /// * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
2504 /// must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
2505 /// sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
2506 /// sent to them.
2507 ///
2508 /// - result `buffer_collection_info` The VMO handles and other related
2509 /// info.
2510 /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
2511 /// cannot be fulfilled due to resource exhaustion.
2512 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
2513 /// malformed.
2514 /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
2515 /// request is valid but cannot be satisfied, perhaps due to hardware
2516 /// limitations. This can happen if participants have incompatible
2517 /// constraints (empty intersection, roughly speaking). See the log for
2518 /// more info. In cases where a participant could potentially be treated
2519 /// as optional, see [`BufferCollectionTokenGroup`]. When using
2520 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
2521 /// error code if there aren't enough buffers in the pre-existing
2522 /// collection to satisfy the constraints set on the attached token and
2523 /// any sub-tree of tokens derived from the attached token.
2524 pub fn r#wait_for_all_buffers_allocated(
2525 &self,
2526 ___deadline: zx::MonotonicInstant,
2527 ) -> Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error> {
2528 let _response = self
2529 .client
2530 .send_query::<fidl::encoding::EmptyPayload, fidl::encoding::FlexibleResultType<
2531 BufferCollectionWaitForAllBuffersAllocatedResponse,
2532 Error,
2533 >>(
2534 (), 0x62300344b61404e, fidl::encoding::DynamicFlags::FLEXIBLE, ___deadline
2535 )?
2536 .into_result::<BufferCollectionMarker>("wait_for_all_buffers_allocated")?;
2537 Ok(_response.map(|x| x))
2538 }
2539
2540 /// Checks whether all the buffers have been allocated, in a polling
2541 /// fashion.
2542 ///
2543 /// * If the buffer collection has been allocated, returns success.
2544 /// * If the buffer collection failed allocation, returns the same
2545 /// [`fuchsia.sysmem2/Error`] as
2546 /// [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
2547 /// return.
2548 /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
2549 /// attempted allocation yet. This means that WaitForAllBuffersAllocated
2550 /// would not respond quickly.
2551 pub fn r#check_all_buffers_allocated(
2552 &self,
2553 ___deadline: zx::MonotonicInstant,
2554 ) -> Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error> {
2555 let _response = self.client.send_query::<
2556 fidl::encoding::EmptyPayload,
2557 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
2558 >(
2559 (),
2560 0x35a5fe77ce939c10,
2561 fidl::encoding::DynamicFlags::FLEXIBLE,
2562 ___deadline,
2563 )?
2564 .into_result::<BufferCollectionMarker>("check_all_buffers_allocated")?;
2565 Ok(_response.map(|x| x))
2566 }
2567
2568 /// Create a new token to add a new participant to an existing logical
2569 /// buffer collection, if the existing collection's buffer counts,
2570 /// constraints, and participants allow.
2571 ///
2572 /// This can be useful in replacing a failed participant, and/or in
2573 /// adding/re-adding a participant after buffers have already been
2574 /// allocated.
2575 ///
2576 /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
2577 /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
2578 /// goes through the normal procedure of setting constraints or closing
2579 /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
2580 /// clients' point of view, despite the possibility that all the buffers
2581 /// were actually allocated previously. This process is called "logical
2582 /// allocation". Most instances of "allocation" in docs for other messages
2583 /// can also be read as "allocation or logical allocation" while remaining
2584 /// valid, but we just say "allocation" in most places for brevity/clarity
2585 /// of explanation, with the details of "logical allocation" left for the
2586 /// docs here on `AttachToken`.
2587 ///
2588 /// Failure of an attached `Node` does not propagate to the parent of the
2589 /// attached `Node`. More generally, failure of a child `Node` is blocked
2590 /// from reaching its parent `Node` if the child is attached, or if the
2591 /// child is dispensable and the failure occurred after logical allocation
2592 /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
2593 ///
2594 /// A participant may in some scenarios choose to initially use a
2595 /// dispensable token for a given instance of a delegate participant, and
2596 /// then later if the first instance of that delegate participant fails, a
2597 /// new second instance of that delegate participant my be given a token
2598 /// created with `AttachToken`.
2599 ///
2600 /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
2601 /// client end, the token acts like any other token. The client can
2602 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
2603 /// and can send the token to a different process/participant. The
2604 /// `BufferCollectionToken` `Node` should be converted to a
2605 /// `BufferCollection` `Node` as normal by sending
2606 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
2607 /// without causing subtree failure by sending
2608 /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
2609 /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
2610 /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
2611 /// the `BufferCollection`.
2612 ///
2613 /// Within the subtree, a success result from
2614 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
2615 /// the subtree participants' constraints were satisfiable using the
2616 /// already-existing buffer collection, the already-established
2617 /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
2618 /// constraints, and the already-existing other participants (already added
2619 /// via successful logical allocation) and their specified buffer counts in
2620 /// their constraints. A failure result means the new participants'
2621 /// constraints cannot be satisfied using the existing buffer collection and
2622 /// its already-added participants. Creating a new collection instead may
2623 /// allow all participants' constraints to be satisfied, assuming
2624 /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
2625 /// used.
2626 ///
2627 /// A token created with `AttachToken` performs constraints aggregation with
2628 /// all constraints currently in effect on the buffer collection, plus the
2629 /// attached token under consideration plus child tokens under the attached
2630 /// token which are not themselves an attached token or under such a token.
2631 /// Further subtrees under this subtree are considered for logical
2632 /// allocation only after this subtree has completed logical allocation.
2633 ///
2634 /// Assignment of existing buffers to participants'
2635 /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
2636 /// etc is first-come first-served, but a child can't logically allocate
2637 /// before all its parents have sent `SetConstraints`.
2638 ///
2639 /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
2640 /// in contrast to `AttachToken`, has the created token `Node` + child
2641 /// `Node`(s) (in the created subtree but not in any subtree under this
2642 /// subtree) participate in constraints aggregation along with its parent
2643 /// during the parent's allocation or logical allocation.
2644 ///
2645 /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
2646 /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
2647 /// sysmem before the new token can be passed to `BindSharedCollection`. The
2648 /// `Sync` of the new token can be accomplished with
2649 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
2650 /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
2651 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
2652 /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
2653 /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
2654 /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
2655 /// created token, to also sync those additional tokens to sysmem using a
2656 /// single round-trip.
2657 ///
2658 /// All table fields are currently required.
2659 ///
2660 /// + request `rights_attentuation_mask` This allows attenuating the VMO
2661 /// rights of the subtree. These values for `rights_attenuation_mask`
2662 /// result in no attenuation (note that 0 is not on this list):
2663 /// + ZX_RIGHT_SAME_RIGHTS (preferred)
2664 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
2665 /// + request `token_request` The server end of the `BufferCollectionToken`
2666 /// channel. The client retains the client end.
2667 pub fn r#attach_token(
2668 &self,
2669 mut payload: BufferCollectionAttachTokenRequest,
2670 ) -> Result<(), fidl::Error> {
2671 self.client.send::<BufferCollectionAttachTokenRequest>(
2672 &mut payload,
2673 0x46ac7d0008492982,
2674 fidl::encoding::DynamicFlags::FLEXIBLE,
2675 )
2676 }
2677
2678 /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
2679 /// buffers have been allocated and only the specified number of buffers (or
2680 /// fewer) remain in the buffer collection.
2681 ///
2682 /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
2683 /// client to wait until an old buffer collection is fully or mostly
2684 /// deallocated before attempting allocation of a new buffer collection. The
2685 /// eventpair is only signalled when the buffers of this collection have
2686 /// been fully deallocated (not just un-referenced by clients, but all the
2687 /// memory consumed by those buffers has been fully reclaimed/recycled), or
2688 /// when allocation or logical allocation fails for the tree or subtree
2689 /// including this [`fuchsia.sysmem2/BufferCollection`].
2690 ///
2691 /// The eventpair won't be signalled until allocation or logical allocation
2692 /// has completed; until then, the collection's current buffer count is
2693 /// ignored.
2694 ///
2695 /// If logical allocation fails for an attached subtree (using
2696 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
2697 /// eventpair will close during that failure regardless of the number of
2698 /// buffers potenitally allocated in the overall buffer collection. This is
2699 /// for logical allocation consistency with normal allocation.
2700 ///
2701 /// The lifetime signalled by this event includes asynchronous cleanup of
2702 /// allocated buffers, and this asynchronous cleanup cannot occur until all
2703 /// holders of VMO handles to the buffers have closed those VMO handles.
2704 /// Therefore, clients should take care not to become blocked forever
2705 /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
2706 /// participants using the logical buffer collection (including the waiter
2707 /// itself) are less trusted, less reliable, or potentially blocked by the
2708 /// wait itself. Waiting asynchronously is recommended. Setting a deadline
2709 /// for the client wait may be prudent, depending on details of how the
2710 /// collection and/or its VMOs are used or shared. Failure to allocate a
2711 /// new/replacement buffer collection is better than getting stuck forever.
2712 ///
2713 /// The sysmem server itself intentionally does not perform any waiting on
2714 /// already-failed collections' VMOs to finish cleaning up before attempting
2715 /// a new allocation, and the sysmem server intentionally doesn't retry
2716 /// allocation if a new allocation fails due to out of memory, even if that
2717 /// failure is potentially due to continued existence of an old collection's
2718 /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
2719 /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
2720 /// as long as the waiting client is careful to not create a deadlock.
2721 ///
2722 /// Continued existence of old collections that are still cleaning up is not
2723 /// the only reason that a new allocation may fail due to insufficient
2724 /// memory, even if the new allocation is allocating physically contiguous
2725 /// buffers. Overall system memory pressure can also be the cause of failure
2726 /// to allocate a new collection. See also
2727 /// [`fuchsia.memorypressure/Provider`].
2728 ///
2729 /// `AttachLifetimeTracking` is meant to be compatible with other protocols
2730 /// with a similar `AttachLifetimeTracking` message; duplicates of the same
2731 /// `eventpair` handle (server end) can be sent via more than one
2732 /// `AttachLifetimeTracking` message to different protocols, and the
2733 /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
2734 /// the conditions are met (all holders of duplicates have closed their
2735 /// server end handle(s)). Also, thanks to how eventpair endponts work, the
2736 /// client end can (also) be duplicated without preventing the
2737 /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
2738 ///
2739 /// The server intentionally doesn't "trust" any signals set on the
2740 /// `server_end`. This mechanism intentionally uses only
2741 /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
2742 /// "early", and is only set when all handles to the server end eventpair
2743 /// are closed. No meaning is associated with any of the other signals, and
2744 /// clients should ignore any other signal bits on either end of the
2745 /// `eventpair`.
2746 ///
2747 /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
2748 /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
2749 /// transfer without causing `BufferCollection` channel failure).
2750 ///
2751 /// All table fields are currently required.
2752 ///
2753 /// + request `server_end` This eventpair handle will be closed by the
2754 /// sysmem server when buffers have been allocated initially and the
2755 /// number of buffers is then less than or equal to `buffers_remaining`.
2756 /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
2757 /// fewer) buffers to be fully deallocated. A number greater than zero can
2758 /// be useful in situations where a known number of buffers are
2759 /// intentionally not closed so that the data can continue to be used,
2760 /// such as for keeping the last available video frame displayed in the UI
2761 /// even if the video stream was using protected output buffers. It's
2762 /// outside the scope of the `BufferCollection` interface (at least for
2763 /// now) to determine how many buffers may be held without closing, but
2764 /// it'll typically be in the range 0-2.
2765 pub fn r#attach_lifetime_tracking(
2766 &self,
2767 mut payload: BufferCollectionAttachLifetimeTrackingRequest,
2768 ) -> Result<(), fidl::Error> {
2769 self.client.send::<BufferCollectionAttachLifetimeTrackingRequest>(
2770 &mut payload,
2771 0x3ecb510113116dcf,
2772 fidl::encoding::DynamicFlags::FLEXIBLE,
2773 )
2774 }
2775}
2776
2777#[cfg(target_os = "fuchsia")]
2778impl From<BufferCollectionSynchronousProxy> for zx::Handle {
2779 fn from(value: BufferCollectionSynchronousProxy) -> Self {
2780 value.into_channel().into()
2781 }
2782}
2783
2784#[cfg(target_os = "fuchsia")]
2785impl From<fidl::Channel> for BufferCollectionSynchronousProxy {
2786 fn from(value: fidl::Channel) -> Self {
2787 Self::new(value)
2788 }
2789}
2790
2791#[cfg(target_os = "fuchsia")]
2792impl fidl::endpoints::FromClient for BufferCollectionSynchronousProxy {
2793 type Protocol = BufferCollectionMarker;
2794
2795 fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionMarker>) -> Self {
2796 Self::new(value.into_channel())
2797 }
2798}
2799
2800#[derive(Debug, Clone)]
2801pub struct BufferCollectionProxy {
2802 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
2803}
2804
2805impl fidl::endpoints::Proxy for BufferCollectionProxy {
2806 type Protocol = BufferCollectionMarker;
2807
2808 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
2809 Self::new(inner)
2810 }
2811
2812 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
2813 self.client.into_channel().map_err(|client| Self { client })
2814 }
2815
2816 fn as_channel(&self) -> &::fidl::AsyncChannel {
2817 self.client.as_channel()
2818 }
2819}
2820
2821impl BufferCollectionProxy {
2822 /// Create a new Proxy for fuchsia.sysmem2/BufferCollection.
2823 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
2824 let protocol_name = <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
2825 Self { client: fidl::client::Client::new(channel, protocol_name) }
2826 }
2827
2828 /// Get a Stream of events from the remote end of the protocol.
2829 ///
2830 /// # Panics
2831 ///
2832 /// Panics if the event stream was already taken.
2833 pub fn take_event_stream(&self) -> BufferCollectionEventStream {
2834 BufferCollectionEventStream { event_receiver: self.client.take_event_receiver() }
2835 }
2836
2837 /// Ensure that previous messages have been received server side. This is
2838 /// particularly useful after previous messages that created new tokens,
2839 /// because a token must be known to the sysmem server before sending the
2840 /// token to another participant.
2841 ///
2842 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
2843 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
2844 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
2845 /// to mitigate the possibility of a hostile/fake
2846 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
2847 /// Another way is to pass the token to
2848 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
2849 /// the token as part of exchanging it for a
2850 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
2851 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
2852 /// of stalling.
2853 ///
2854 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
2855 /// and then starting and completing a `Sync`, it's then safe to send the
2856 /// `BufferCollectionToken` client ends to other participants knowing the
2857 /// server will recognize the tokens when they're sent by the other
2858 /// participants to sysmem in a
2859 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
2860 /// efficient way to create tokens while avoiding unnecessary round trips.
2861 ///
2862 /// Other options include waiting for each
2863 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
2864 /// individually (using separate call to `Sync` after each), or calling
2865 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
2866 /// converted to a `BufferCollection` via
2867 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
2868 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
2869 /// the sync step and can create multiple tokens at once.
2870 pub fn r#sync(
2871 &self,
2872 ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
2873 BufferCollectionProxyInterface::r#sync(self)
2874 }
2875
2876 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
2877 ///
2878 /// Normally a participant will convert a `BufferCollectionToken` into a
2879 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
2880 /// `Release` via the token (and then close the channel immediately or
2881 /// shortly later in response to server closing the server end), which
2882 /// avoids causing buffer collection failure. Without a prior `Release`,
2883 /// closing the `BufferCollectionToken` client end will cause buffer
2884 /// collection failure.
2885 ///
2886 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
2887 ///
2888 /// By default the server handles unexpected closure of a
2889 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
2890 /// first) by failing the buffer collection. Partly this is to expedite
2891 /// closing VMO handles to reclaim memory when any participant fails. If a
2892 /// participant would like to cleanly close a `BufferCollection` without
2893 /// causing buffer collection failure, the participant can send `Release`
2894 /// before closing the `BufferCollection` client end. The `Release` can
2895 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
2896 /// buffer collection won't require constraints from this node in order to
2897 /// allocate. If after `SetConstraints`, the constraints are retained and
2898 /// aggregated, despite the lack of `BufferCollection` connection at the
2899 /// time of constraints aggregation.
2900 ///
2901 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
2902 ///
2903 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
2904 /// end (without `Release` first) will trigger failure of the buffer
2905 /// collection. To close a `BufferCollectionTokenGroup` channel without
2906 /// failing the buffer collection, ensure that AllChildrenPresent() has been
2907 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
2908 /// client end.
2909 ///
2910 /// If `Release` occurs before
2911 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
2912 /// buffer collection will fail (triggered by reception of `Release` without
2913 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
2914 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
2915 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
2916 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
2917 /// close requires `AllChildrenPresent` (if not already sent), then
2918 /// `Release`, then close client end.
2919 ///
2920 /// If `Release` occurs after `AllChildrenPresent`, the children and all
2921 /// their constraints remain intact (just as they would if the
2922 /// `BufferCollectionTokenGroup` channel had remained open), and the client
2923 /// end close doesn't trigger buffer collection failure.
2924 ///
2925 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
2926 ///
2927 /// For brevity, the per-channel-protocol paragraphs above ignore the
2928 /// separate failure domain created by
2929 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
2930 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
2931 /// unexpectedly closes (without `Release` first) and that client end is
2932 /// under a failure domain, instead of failing the whole buffer collection,
2933 /// the failure domain is failed, but the buffer collection itself is
2934 /// isolated from failure of the failure domain. Such failure domains can be
2935 /// nested, in which case only the inner-most failure domain in which the
2936 /// `Node` resides fails.
2937 pub fn r#release(&self) -> Result<(), fidl::Error> {
2938 BufferCollectionProxyInterface::r#release(self)
2939 }
2940
2941 /// Set a name for VMOs in this buffer collection.
2942 ///
2943 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
2944 /// will be truncated to fit. The name of the vmo will be suffixed with the
2945 /// buffer index within the collection (if the suffix fits within
2946 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
2947 /// listed in the inspect data.
2948 ///
2949 /// The name only affects VMOs allocated after the name is set; this call
2950 /// does not rename existing VMOs. If multiple clients set different names
2951 /// then the larger priority value will win. Setting a new name with the
2952 /// same priority as a prior name doesn't change the name.
2953 ///
2954 /// All table fields are currently required.
2955 ///
2956 /// + request `priority` The name is only set if this is the first `SetName`
2957 /// or if `priority` is greater than any previous `priority` value in
2958 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
2959 /// + request `name` The name for VMOs created under this buffer collection.
2960 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
2961 BufferCollectionProxyInterface::r#set_name(self, payload)
2962 }
2963
2964 /// Set information about the current client that can be used by sysmem to
2965 /// help diagnose leaking memory and allocation stalls waiting for a
2966 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
2967 ///
2968 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
2969 /// `Node`(s) derived from this `Node`, unless overriden by
2970 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
2971 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
2972 ///
2973 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
2974 /// `Allocator` is the most efficient way to ensure that all
2975 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
2976 /// set, and is also more efficient than separately sending the same debug
2977 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
2978 /// created [`fuchsia.sysmem2/Node`].
2979 ///
2980 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
2981 /// indicate which client is closing their channel first, leading to subtree
2982 /// failure (which can be normal if the purpose of the subtree is over, but
2983 /// if happening earlier than expected, the client-channel-specific name can
2984 /// help diagnose where the failure is first coming from, from sysmem's
2985 /// point of view).
2986 ///
2987 /// All table fields are currently required.
2988 ///
2989 /// + request `name` This can be an arbitrary string, but the current
2990 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
2991 /// + request `id` This can be an arbitrary id, but the current process ID
2992 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
2993 pub fn r#set_debug_client_info(
2994 &self,
2995 mut payload: &NodeSetDebugClientInfoRequest,
2996 ) -> Result<(), fidl::Error> {
2997 BufferCollectionProxyInterface::r#set_debug_client_info(self, payload)
2998 }
2999
3000 /// Sysmem logs a warning if sysmem hasn't seen
3001 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
3002 /// within 5 seconds after creation of a new collection.
3003 ///
3004 /// Clients can call this method to change when the log is printed. If
3005 /// multiple client set the deadline, it's unspecified which deadline will
3006 /// take effect.
3007 ///
3008 /// In most cases the default works well.
3009 ///
3010 /// All table fields are currently required.
3011 ///
3012 /// + request `deadline` The time at which sysmem will start trying to log
3013 /// the warning, unless all constraints are with sysmem by then.
3014 pub fn r#set_debug_timeout_log_deadline(
3015 &self,
3016 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
3017 ) -> Result<(), fidl::Error> {
3018 BufferCollectionProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
3019 }
3020
3021 /// This enables verbose logging for the buffer collection.
3022 ///
3023 /// Verbose logging includes constraints set via
3024 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
3025 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
3026 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
3027 /// the tree of `Node`(s).
3028 ///
3029 /// Normally sysmem prints only a single line complaint when aggregation
3030 /// fails, with just the specific detailed reason that aggregation failed,
3031 /// with little surrounding context. While this is often enough to diagnose
3032 /// a problem if only a small change was made and everything was working
3033 /// before the small change, it's often not particularly helpful for getting
3034 /// a new buffer collection to work for the first time. Especially with
3035 /// more complex trees of nodes, involving things like
3036 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
3037 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
3038 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
3039 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
3040 /// looks like and why it's failing a logical allocation, or why a tree or
3041 /// subtree is failing sooner than expected.
3042 ///
3043 /// The intent of the extra logging is to be acceptable from a performance
3044 /// point of view, under the assumption that verbose logging is only enabled
3045 /// on a low number of buffer collections. If we're not tracking down a bug,
3046 /// we shouldn't send this message.
3047 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
3048 BufferCollectionProxyInterface::r#set_verbose_logging(self)
3049 }
3050
3051 /// This gets a handle that can be used as a parameter to
3052 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
3053 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
3054 /// client obtained this handle from this `Node`.
3055 ///
3056 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
3057 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
3058 /// despite the two calls typically being on different channels.
3059 ///
3060 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
3061 ///
3062 /// All table fields are currently required.
3063 ///
3064 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
3065 /// different `Node` channel, to prove that the client obtained the handle
3066 /// from this `Node`.
3067 pub fn r#get_node_ref(
3068 &self,
3069 ) -> fidl::client::QueryResponseFut<
3070 NodeGetNodeRefResponse,
3071 fidl::encoding::DefaultFuchsiaResourceDialect,
3072 > {
3073 BufferCollectionProxyInterface::r#get_node_ref(self)
3074 }
3075
3076 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
3077 /// rooted at a different child token of a common parent
3078 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
3079 /// passed-in `node_ref`.
3080 ///
3081 /// This call is for assisting with admission control de-duplication, and
3082 /// with debugging.
3083 ///
3084 /// The `node_ref` must be obtained using
3085 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
3086 ///
3087 /// The `node_ref` can be a duplicated handle; it's not necessary to call
3088 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
3089 ///
3090 /// If a calling token may not actually be a valid token at all due to a
3091 /// potentially hostile/untrusted provider of the token, call
3092 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
3093 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
3094 /// never responds due to a calling token not being a real token (not really
3095 /// talking to sysmem). Another option is to call
3096 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
3097 /// which also validates the token along with converting it to a
3098 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
3099 ///
3100 /// All table fields are currently required.
3101 ///
3102 /// - response `is_alternate`
3103 /// - true: The first parent node in common between the calling node and
3104 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
3105 /// that the calling `Node` and the `node_ref` `Node` will not have both
3106 /// their constraints apply - rather sysmem will choose one or the other
3107 /// of the constraints - never both. This is because only one child of
3108 /// a `BufferCollectionTokenGroup` is selected during logical
3109 /// allocation, with only that one child's subtree contributing to
3110 /// constraints aggregation.
3111 /// - false: The first parent node in common between the calling `Node`
3112 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
3113 /// Currently, this means the first parent node in common is a
3114 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
3115 /// `Release`ed). This means that the calling `Node` and the `node_ref`
3116 /// `Node` may have both their constraints apply during constraints
3117 /// aggregation of the logical allocation, if both `Node`(s) are
3118 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
3119 /// this case, there is no `BufferCollectionTokenGroup` that will
3120 /// directly prevent the two `Node`(s) from both being selected and
3121 /// their constraints both aggregated, but even when false, one or both
3122 /// `Node`(s) may still be eliminated from consideration if one or both
3123 /// `Node`(s) has a direct or indirect parent
3124 /// `BufferCollectionTokenGroup` which selects a child subtree other
3125 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
3126 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
3127 /// associated with the same buffer collection as the calling `Node`.
3128 /// Another reason for this error is if the `node_ref` is an
3129 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
3130 /// a real `node_ref` obtained from `GetNodeRef`.
3131 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
3132 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
3133 /// the needed rights expected on a real `node_ref`.
3134 /// * No other failing status codes are returned by this call. However,
3135 /// sysmem may add additional codes in future, so the client should have
3136 /// sensible default handling for any failing status code.
3137 pub fn r#is_alternate_for(
3138 &self,
3139 mut payload: NodeIsAlternateForRequest,
3140 ) -> fidl::client::QueryResponseFut<
3141 NodeIsAlternateForResult,
3142 fidl::encoding::DefaultFuchsiaResourceDialect,
3143 > {
3144 BufferCollectionProxyInterface::r#is_alternate_for(self, payload)
3145 }
3146
3147 /// Get the buffer collection ID. This ID is also available from
3148 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
3149 /// within the collection).
3150 ///
3151 /// This call is mainly useful in situations where we can't convey a
3152 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
3153 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
3154 /// handle, which can be joined back up with a `BufferCollection` client end
3155 /// that was created via a different path. Prefer to convey a
3156 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
3157 ///
3158 /// Trusting a `buffer_collection_id` value from a source other than sysmem
3159 /// is analogous to trusting a koid value from a source other than zircon.
3160 /// Both should be avoided unless really necessary, and both require
3161 /// caution. In some situations it may be reasonable to refer to a
3162 /// pre-established `BufferCollection` by `buffer_collection_id` via a
3163 /// protocol for efficiency reasons, but an incoming value purporting to be
3164 /// a `buffer_collection_id` is not sufficient alone to justify granting the
3165 /// sender of the `buffer_collection_id` any capability. The sender must
3166 /// first prove to a receiver that the sender has/had a VMO or has/had a
3167 /// `BufferCollectionToken` to the same collection by sending a handle that
3168 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
3169 /// `buffer_collection_id` value. The receiver should take care to avoid
3170 /// assuming that a sender had a `BufferCollectionToken` in cases where the
3171 /// sender has only proven that the sender had a VMO.
3172 ///
3173 /// - response `buffer_collection_id` This ID is unique per buffer
3174 /// collection per boot. Each buffer is uniquely identified by the
3175 /// `buffer_collection_id` and `buffer_index` together.
3176 pub fn r#get_buffer_collection_id(
3177 &self,
3178 ) -> fidl::client::QueryResponseFut<
3179 NodeGetBufferCollectionIdResponse,
3180 fidl::encoding::DefaultFuchsiaResourceDialect,
3181 > {
3182 BufferCollectionProxyInterface::r#get_buffer_collection_id(self)
3183 }
3184
3185 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
3186 /// created after this message to weak, which means that a client's `Node`
3187 /// client end (or a child created after this message) is not alone
3188 /// sufficient to keep allocated VMOs alive.
3189 ///
3190 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
3191 /// `close_weak_asap`.
3192 ///
3193 /// This message is only permitted before the `Node` becomes ready for
3194 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
3195 /// * `BufferCollectionToken`: any time
3196 /// * `BufferCollection`: before `SetConstraints`
3197 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
3198 ///
3199 /// Currently, no conversion from strong `Node` to weak `Node` after ready
3200 /// for allocation is provided, but a client can simulate that by creating
3201 /// an additional `Node` before allocation and setting that additional
3202 /// `Node` to weak, and then potentially at some point later sending
3203 /// `Release` and closing the client end of the client's strong `Node`, but
3204 /// keeping the client's weak `Node`.
3205 ///
3206 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
3207 /// collection failure (all `Node` client end(s) will see
3208 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
3209 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
3210 /// this situation until all `Node`(s) are ready for allocation. For initial
3211 /// allocation to succeed, at least one strong `Node` is required to exist
3212 /// at allocation time, but after that client receives VMO handles, that
3213 /// client can `BufferCollection.Release` and close the client end without
3214 /// causing this type of failure.
3215 ///
3216 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
3217 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
3218 /// separately as appropriate.
3219 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
3220 BufferCollectionProxyInterface::r#set_weak(self)
3221 }
3222
3223 /// This indicates to sysmem that the client is prepared to pay attention to
3224 /// `close_weak_asap`.
3225 ///
3226 /// If sent, this message must be before
3227 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
3228 ///
3229 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
3230 /// send this message before `WaitForAllBuffersAllocated`, or a parent
3231 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
3232 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
3233 /// trigger buffer collection failure.
3234 ///
3235 /// This message is necessary because weak sysmem VMOs have not always been
3236 /// a thing, so older clients are not aware of the need to pay attention to
3237 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
3238 /// sysmem weak VMO handles asap. By having this message and requiring
3239 /// participants to indicate their acceptance of this aspect of the overall
3240 /// protocol, we avoid situations where an older client is delivered a weak
3241 /// VMO without any way for sysmem to get that VMO to close quickly later
3242 /// (and on a per-buffer basis).
3243 ///
3244 /// A participant that doesn't handle `close_weak_asap` and also doesn't
3245 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
3246 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
3247 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
3248 /// same participant has a child/delegate which does retrieve VMOs, that
3249 /// child/delegate will need to send `SetWeakOk` before
3250 /// `WaitForAllBuffersAllocated`.
3251 ///
3252 /// + request `for_child_nodes_also` If present and true, this means direct
3253 /// child nodes of this node created after this message plus all
3254 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
3255 /// those nodes. Any child node of this node that was created before this
3256 /// message is not included. This setting is "sticky" in the sense that a
3257 /// subsequent `SetWeakOk` without this bool set to true does not reset
3258 /// the server-side bool. If this creates a problem for a participant, a
3259 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
3260 /// tokens instead, as appropriate. A participant should only set
3261 /// `for_child_nodes_also` true if the participant can really promise to
3262 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
3263 /// weak VMO handles held by participants holding the corresponding child
3264 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
3265 /// which are using sysmem(1) can be weak, despite the clients of those
3266 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
3267 /// direct way to find out about `close_weak_asap`. This only applies to
3268 /// descendents of this `Node` which are using sysmem(1), not to this
3269 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
3270 /// token, which will fail allocation unless an ancestor of this `Node`
3271 /// specified `for_child_nodes_also` true.
3272 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
3273 BufferCollectionProxyInterface::r#set_weak_ok(self, payload)
3274 }
3275
3276 /// The server_end will be closed after this `Node` and any child nodes have
3277 /// have released their buffer counts, making those counts available for
3278 /// reservation by a different `Node` via
3279 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
3280 ///
3281 /// The `Node` buffer counts may not be released until the entire tree of
3282 /// `Node`(s) is closed or failed, because
3283 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
3284 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
3285 /// `Node` buffer counts remain reserved until the orphaned node is later
3286 /// cleaned up.
3287 ///
3288 /// If the `Node` exceeds a fairly large number of attached eventpair server
3289 /// ends, a log message will indicate this and the `Node` (and the
3290 /// appropriate) sub-tree will fail.
3291 ///
3292 /// The `server_end` will remain open when
3293 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
3294 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
3295 /// [`fuchsia.sysmem2/BufferCollection`].
3296 ///
3297 /// This message can also be used with a
3298 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
3299 pub fn r#attach_node_tracking(
3300 &self,
3301 mut payload: NodeAttachNodeTrackingRequest,
3302 ) -> Result<(), fidl::Error> {
3303 BufferCollectionProxyInterface::r#attach_node_tracking(self, payload)
3304 }
3305
3306 /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
3307 /// collection.
3308 ///
3309 /// A participant may only call
3310 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
3311 /// [`fuchsia.sysmem2/BufferCollection`].
3312 ///
3313 /// For buffer allocation to be attempted, all holders of a
3314 /// `BufferCollection` client end need to call `SetConstraints` before
3315 /// sysmem will attempt to allocate buffers.
3316 ///
3317 /// + request `constraints` These are the constraints on the buffer
3318 /// collection imposed by the sending client/participant. The
3319 /// `constraints` field is not required to be set. If not set, the client
3320 /// is not setting any actual constraints, but is indicating that the
3321 /// client has no constraints to set. A client that doesn't set the
3322 /// `constraints` field won't receive any VMO handles, but can still find
3323 /// out how many buffers were allocated and can still refer to buffers by
3324 /// their `buffer_index`.
3325 pub fn r#set_constraints(
3326 &self,
3327 mut payload: BufferCollectionSetConstraintsRequest,
3328 ) -> Result<(), fidl::Error> {
3329 BufferCollectionProxyInterface::r#set_constraints(self, payload)
3330 }
3331
3332 /// Wait until all buffers are allocated.
3333 ///
3334 /// This FIDL call completes when buffers have been allocated, or completes
3335 /// with some failure detail if allocation has been attempted but failed.
3336 ///
3337 /// The following must occur before buffers will be allocated:
3338 /// * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
3339 /// collection must be turned in via `BindSharedCollection` to get a
3340 /// [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
3341 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
3342 /// or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
3343 /// to them.
3344 /// * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
3345 /// must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
3346 /// sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
3347 /// sent to them.
3348 ///
3349 /// - result `buffer_collection_info` The VMO handles and other related
3350 /// info.
3351 /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
3352 /// cannot be fulfilled due to resource exhaustion.
3353 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
3354 /// malformed.
3355 /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
3356 /// request is valid but cannot be satisfied, perhaps due to hardware
3357 /// limitations. This can happen if participants have incompatible
3358 /// constraints (empty intersection, roughly speaking). See the log for
3359 /// more info. In cases where a participant could potentially be treated
3360 /// as optional, see [`BufferCollectionTokenGroup`]. When using
3361 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
3362 /// error code if there aren't enough buffers in the pre-existing
3363 /// collection to satisfy the constraints set on the attached token and
3364 /// any sub-tree of tokens derived from the attached token.
3365 pub fn r#wait_for_all_buffers_allocated(
3366 &self,
3367 ) -> fidl::client::QueryResponseFut<
3368 BufferCollectionWaitForAllBuffersAllocatedResult,
3369 fidl::encoding::DefaultFuchsiaResourceDialect,
3370 > {
3371 BufferCollectionProxyInterface::r#wait_for_all_buffers_allocated(self)
3372 }
3373
3374 /// Checks whether all the buffers have been allocated, in a polling
3375 /// fashion.
3376 ///
3377 /// * If the buffer collection has been allocated, returns success.
3378 /// * If the buffer collection failed allocation, returns the same
3379 /// [`fuchsia.sysmem2/Error`] as
3380 /// [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
3381 /// return.
3382 /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
3383 /// attempted allocation yet. This means that WaitForAllBuffersAllocated
3384 /// would not respond quickly.
3385 pub fn r#check_all_buffers_allocated(
3386 &self,
3387 ) -> fidl::client::QueryResponseFut<
3388 BufferCollectionCheckAllBuffersAllocatedResult,
3389 fidl::encoding::DefaultFuchsiaResourceDialect,
3390 > {
3391 BufferCollectionProxyInterface::r#check_all_buffers_allocated(self)
3392 }
3393
3394 /// Create a new token to add a new participant to an existing logical
3395 /// buffer collection, if the existing collection's buffer counts,
3396 /// constraints, and participants allow.
3397 ///
3398 /// This can be useful in replacing a failed participant, and/or in
3399 /// adding/re-adding a participant after buffers have already been
3400 /// allocated.
3401 ///
3402 /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
3403 /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
3404 /// goes through the normal procedure of setting constraints or closing
3405 /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
3406 /// clients' point of view, despite the possibility that all the buffers
3407 /// were actually allocated previously. This process is called "logical
3408 /// allocation". Most instances of "allocation" in docs for other messages
3409 /// can also be read as "allocation or logical allocation" while remaining
3410 /// valid, but we just say "allocation" in most places for brevity/clarity
3411 /// of explanation, with the details of "logical allocation" left for the
3412 /// docs here on `AttachToken`.
3413 ///
3414 /// Failure of an attached `Node` does not propagate to the parent of the
3415 /// attached `Node`. More generally, failure of a child `Node` is blocked
3416 /// from reaching its parent `Node` if the child is attached, or if the
3417 /// child is dispensable and the failure occurred after logical allocation
3418 /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
3419 ///
3420 /// A participant may in some scenarios choose to initially use a
3421 /// dispensable token for a given instance of a delegate participant, and
3422 /// then later if the first instance of that delegate participant fails, a
3423 /// new second instance of that delegate participant my be given a token
3424 /// created with `AttachToken`.
3425 ///
3426 /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
3427 /// client end, the token acts like any other token. The client can
3428 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
3429 /// and can send the token to a different process/participant. The
3430 /// `BufferCollectionToken` `Node` should be converted to a
3431 /// `BufferCollection` `Node` as normal by sending
3432 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
3433 /// without causing subtree failure by sending
3434 /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
3435 /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
3436 /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
3437 /// the `BufferCollection`.
3438 ///
3439 /// Within the subtree, a success result from
3440 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
3441 /// the subtree participants' constraints were satisfiable using the
3442 /// already-existing buffer collection, the already-established
3443 /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
3444 /// constraints, and the already-existing other participants (already added
3445 /// via successful logical allocation) and their specified buffer counts in
3446 /// their constraints. A failure result means the new participants'
3447 /// constraints cannot be satisfied using the existing buffer collection and
3448 /// its already-added participants. Creating a new collection instead may
3449 /// allow all participants' constraints to be satisfied, assuming
3450 /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
3451 /// used.
3452 ///
3453 /// A token created with `AttachToken` performs constraints aggregation with
3454 /// all constraints currently in effect on the buffer collection, plus the
3455 /// attached token under consideration plus child tokens under the attached
3456 /// token which are not themselves an attached token or under such a token.
3457 /// Further subtrees under this subtree are considered for logical
3458 /// allocation only after this subtree has completed logical allocation.
3459 ///
3460 /// Assignment of existing buffers to participants'
3461 /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
3462 /// etc is first-come first-served, but a child can't logically allocate
3463 /// before all its parents have sent `SetConstraints`.
3464 ///
3465 /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
3466 /// in contrast to `AttachToken`, has the created token `Node` + child
3467 /// `Node`(s) (in the created subtree but not in any subtree under this
3468 /// subtree) participate in constraints aggregation along with its parent
3469 /// during the parent's allocation or logical allocation.
3470 ///
3471 /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
3472 /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
3473 /// sysmem before the new token can be passed to `BindSharedCollection`. The
3474 /// `Sync` of the new token can be accomplished with
3475 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
3476 /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
3477 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
3478 /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
3479 /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
3480 /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
3481 /// created token, to also sync those additional tokens to sysmem using a
3482 /// single round-trip.
3483 ///
3484 /// All table fields are currently required.
3485 ///
3486 /// + request `rights_attentuation_mask` This allows attenuating the VMO
3487 /// rights of the subtree. These values for `rights_attenuation_mask`
3488 /// result in no attenuation (note that 0 is not on this list):
3489 /// + ZX_RIGHT_SAME_RIGHTS (preferred)
3490 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
3491 /// + request `token_request` The server end of the `BufferCollectionToken`
3492 /// channel. The client retains the client end.
3493 pub fn r#attach_token(
3494 &self,
3495 mut payload: BufferCollectionAttachTokenRequest,
3496 ) -> Result<(), fidl::Error> {
3497 BufferCollectionProxyInterface::r#attach_token(self, payload)
3498 }
3499
3500 /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
3501 /// buffers have been allocated and only the specified number of buffers (or
3502 /// fewer) remain in the buffer collection.
3503 ///
3504 /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
3505 /// client to wait until an old buffer collection is fully or mostly
3506 /// deallocated before attempting allocation of a new buffer collection. The
3507 /// eventpair is only signalled when the buffers of this collection have
3508 /// been fully deallocated (not just un-referenced by clients, but all the
3509 /// memory consumed by those buffers has been fully reclaimed/recycled), or
3510 /// when allocation or logical allocation fails for the tree or subtree
3511 /// including this [`fuchsia.sysmem2/BufferCollection`].
3512 ///
3513 /// The eventpair won't be signalled until allocation or logical allocation
3514 /// has completed; until then, the collection's current buffer count is
3515 /// ignored.
3516 ///
3517 /// If logical allocation fails for an attached subtree (using
3518 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
3519 /// eventpair will close during that failure regardless of the number of
3520 /// buffers potenitally allocated in the overall buffer collection. This is
3521 /// for logical allocation consistency with normal allocation.
3522 ///
3523 /// The lifetime signalled by this event includes asynchronous cleanup of
3524 /// allocated buffers, and this asynchronous cleanup cannot occur until all
3525 /// holders of VMO handles to the buffers have closed those VMO handles.
3526 /// Therefore, clients should take care not to become blocked forever
3527 /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
3528 /// participants using the logical buffer collection (including the waiter
3529 /// itself) are less trusted, less reliable, or potentially blocked by the
3530 /// wait itself. Waiting asynchronously is recommended. Setting a deadline
3531 /// for the client wait may be prudent, depending on details of how the
3532 /// collection and/or its VMOs are used or shared. Failure to allocate a
3533 /// new/replacement buffer collection is better than getting stuck forever.
3534 ///
3535 /// The sysmem server itself intentionally does not perform any waiting on
3536 /// already-failed collections' VMOs to finish cleaning up before attempting
3537 /// a new allocation, and the sysmem server intentionally doesn't retry
3538 /// allocation if a new allocation fails due to out of memory, even if that
3539 /// failure is potentially due to continued existence of an old collection's
3540 /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
3541 /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
3542 /// as long as the waiting client is careful to not create a deadlock.
3543 ///
3544 /// Continued existence of old collections that are still cleaning up is not
3545 /// the only reason that a new allocation may fail due to insufficient
3546 /// memory, even if the new allocation is allocating physically contiguous
3547 /// buffers. Overall system memory pressure can also be the cause of failure
3548 /// to allocate a new collection. See also
3549 /// [`fuchsia.memorypressure/Provider`].
3550 ///
3551 /// `AttachLifetimeTracking` is meant to be compatible with other protocols
3552 /// with a similar `AttachLifetimeTracking` message; duplicates of the same
3553 /// `eventpair` handle (server end) can be sent via more than one
3554 /// `AttachLifetimeTracking` message to different protocols, and the
3555 /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
3556 /// the conditions are met (all holders of duplicates have closed their
3557 /// server end handle(s)). Also, thanks to how eventpair endponts work, the
3558 /// client end can (also) be duplicated without preventing the
3559 /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
3560 ///
3561 /// The server intentionally doesn't "trust" any signals set on the
3562 /// `server_end`. This mechanism intentionally uses only
3563 /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
3564 /// "early", and is only set when all handles to the server end eventpair
3565 /// are closed. No meaning is associated with any of the other signals, and
3566 /// clients should ignore any other signal bits on either end of the
3567 /// `eventpair`.
3568 ///
3569 /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
3570 /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
3571 /// transfer without causing `BufferCollection` channel failure).
3572 ///
3573 /// All table fields are currently required.
3574 ///
3575 /// + request `server_end` This eventpair handle will be closed by the
3576 /// sysmem server when buffers have been allocated initially and the
3577 /// number of buffers is then less than or equal to `buffers_remaining`.
3578 /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
3579 /// fewer) buffers to be fully deallocated. A number greater than zero can
3580 /// be useful in situations where a known number of buffers are
3581 /// intentionally not closed so that the data can continue to be used,
3582 /// such as for keeping the last available video frame displayed in the UI
3583 /// even if the video stream was using protected output buffers. It's
3584 /// outside the scope of the `BufferCollection` interface (at least for
3585 /// now) to determine how many buffers may be held without closing, but
3586 /// it'll typically be in the range 0-2.
3587 pub fn r#attach_lifetime_tracking(
3588 &self,
3589 mut payload: BufferCollectionAttachLifetimeTrackingRequest,
3590 ) -> Result<(), fidl::Error> {
3591 BufferCollectionProxyInterface::r#attach_lifetime_tracking(self, payload)
3592 }
3593}
3594
3595impl BufferCollectionProxyInterface for BufferCollectionProxy {
3596 type SyncResponseFut =
3597 fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
3598 fn r#sync(&self) -> Self::SyncResponseFut {
3599 fn _decode(
3600 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3601 ) -> Result<(), fidl::Error> {
3602 let _response = fidl::client::decode_transaction_body::<
3603 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
3604 fidl::encoding::DefaultFuchsiaResourceDialect,
3605 0x11ac2555cf575b54,
3606 >(_buf?)?
3607 .into_result::<BufferCollectionMarker>("sync")?;
3608 Ok(_response)
3609 }
3610 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
3611 (),
3612 0x11ac2555cf575b54,
3613 fidl::encoding::DynamicFlags::FLEXIBLE,
3614 _decode,
3615 )
3616 }
3617
3618 fn r#release(&self) -> Result<(), fidl::Error> {
3619 self.client.send::<fidl::encoding::EmptyPayload>(
3620 (),
3621 0x6a5cae7d6d6e04c6,
3622 fidl::encoding::DynamicFlags::FLEXIBLE,
3623 )
3624 }
3625
3626 fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
3627 self.client.send::<NodeSetNameRequest>(
3628 payload,
3629 0xb41f1624f48c1e9,
3630 fidl::encoding::DynamicFlags::FLEXIBLE,
3631 )
3632 }
3633
3634 fn r#set_debug_client_info(
3635 &self,
3636 mut payload: &NodeSetDebugClientInfoRequest,
3637 ) -> Result<(), fidl::Error> {
3638 self.client.send::<NodeSetDebugClientInfoRequest>(
3639 payload,
3640 0x5cde8914608d99b1,
3641 fidl::encoding::DynamicFlags::FLEXIBLE,
3642 )
3643 }
3644
3645 fn r#set_debug_timeout_log_deadline(
3646 &self,
3647 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
3648 ) -> Result<(), fidl::Error> {
3649 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
3650 payload,
3651 0x716b0af13d5c0806,
3652 fidl::encoding::DynamicFlags::FLEXIBLE,
3653 )
3654 }
3655
3656 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
3657 self.client.send::<fidl::encoding::EmptyPayload>(
3658 (),
3659 0x5209c77415b4dfad,
3660 fidl::encoding::DynamicFlags::FLEXIBLE,
3661 )
3662 }
3663
3664 type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
3665 NodeGetNodeRefResponse,
3666 fidl::encoding::DefaultFuchsiaResourceDialect,
3667 >;
3668 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
3669 fn _decode(
3670 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3671 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
3672 let _response = fidl::client::decode_transaction_body::<
3673 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
3674 fidl::encoding::DefaultFuchsiaResourceDialect,
3675 0x5b3d0e51614df053,
3676 >(_buf?)?
3677 .into_result::<BufferCollectionMarker>("get_node_ref")?;
3678 Ok(_response)
3679 }
3680 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
3681 (),
3682 0x5b3d0e51614df053,
3683 fidl::encoding::DynamicFlags::FLEXIBLE,
3684 _decode,
3685 )
3686 }
3687
3688 type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
3689 NodeIsAlternateForResult,
3690 fidl::encoding::DefaultFuchsiaResourceDialect,
3691 >;
3692 fn r#is_alternate_for(
3693 &self,
3694 mut payload: NodeIsAlternateForRequest,
3695 ) -> Self::IsAlternateForResponseFut {
3696 fn _decode(
3697 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3698 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
3699 let _response = fidl::client::decode_transaction_body::<
3700 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
3701 fidl::encoding::DefaultFuchsiaResourceDialect,
3702 0x3a58e00157e0825,
3703 >(_buf?)?
3704 .into_result::<BufferCollectionMarker>("is_alternate_for")?;
3705 Ok(_response.map(|x| x))
3706 }
3707 self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
3708 &mut payload,
3709 0x3a58e00157e0825,
3710 fidl::encoding::DynamicFlags::FLEXIBLE,
3711 _decode,
3712 )
3713 }
3714
3715 type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
3716 NodeGetBufferCollectionIdResponse,
3717 fidl::encoding::DefaultFuchsiaResourceDialect,
3718 >;
3719 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
3720 fn _decode(
3721 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3722 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
3723 let _response = fidl::client::decode_transaction_body::<
3724 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
3725 fidl::encoding::DefaultFuchsiaResourceDialect,
3726 0x77d19a494b78ba8c,
3727 >(_buf?)?
3728 .into_result::<BufferCollectionMarker>("get_buffer_collection_id")?;
3729 Ok(_response)
3730 }
3731 self.client.send_query_and_decode::<
3732 fidl::encoding::EmptyPayload,
3733 NodeGetBufferCollectionIdResponse,
3734 >(
3735 (),
3736 0x77d19a494b78ba8c,
3737 fidl::encoding::DynamicFlags::FLEXIBLE,
3738 _decode,
3739 )
3740 }
3741
3742 fn r#set_weak(&self) -> Result<(), fidl::Error> {
3743 self.client.send::<fidl::encoding::EmptyPayload>(
3744 (),
3745 0x22dd3ea514eeffe1,
3746 fidl::encoding::DynamicFlags::FLEXIBLE,
3747 )
3748 }
3749
3750 fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
3751 self.client.send::<NodeSetWeakOkRequest>(
3752 &mut payload,
3753 0x38a44fc4d7724be9,
3754 fidl::encoding::DynamicFlags::FLEXIBLE,
3755 )
3756 }
3757
3758 fn r#attach_node_tracking(
3759 &self,
3760 mut payload: NodeAttachNodeTrackingRequest,
3761 ) -> Result<(), fidl::Error> {
3762 self.client.send::<NodeAttachNodeTrackingRequest>(
3763 &mut payload,
3764 0x3f22f2a293d3cdac,
3765 fidl::encoding::DynamicFlags::FLEXIBLE,
3766 )
3767 }
3768
3769 fn r#set_constraints(
3770 &self,
3771 mut payload: BufferCollectionSetConstraintsRequest,
3772 ) -> Result<(), fidl::Error> {
3773 self.client.send::<BufferCollectionSetConstraintsRequest>(
3774 &mut payload,
3775 0x1fde0f19d650197b,
3776 fidl::encoding::DynamicFlags::FLEXIBLE,
3777 )
3778 }
3779
3780 type WaitForAllBuffersAllocatedResponseFut = fidl::client::QueryResponseFut<
3781 BufferCollectionWaitForAllBuffersAllocatedResult,
3782 fidl::encoding::DefaultFuchsiaResourceDialect,
3783 >;
3784 fn r#wait_for_all_buffers_allocated(&self) -> Self::WaitForAllBuffersAllocatedResponseFut {
3785 fn _decode(
3786 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3787 ) -> Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error> {
3788 let _response = fidl::client::decode_transaction_body::<
3789 fidl::encoding::FlexibleResultType<
3790 BufferCollectionWaitForAllBuffersAllocatedResponse,
3791 Error,
3792 >,
3793 fidl::encoding::DefaultFuchsiaResourceDialect,
3794 0x62300344b61404e,
3795 >(_buf?)?
3796 .into_result::<BufferCollectionMarker>("wait_for_all_buffers_allocated")?;
3797 Ok(_response.map(|x| x))
3798 }
3799 self.client.send_query_and_decode::<
3800 fidl::encoding::EmptyPayload,
3801 BufferCollectionWaitForAllBuffersAllocatedResult,
3802 >(
3803 (),
3804 0x62300344b61404e,
3805 fidl::encoding::DynamicFlags::FLEXIBLE,
3806 _decode,
3807 )
3808 }
3809
3810 type CheckAllBuffersAllocatedResponseFut = fidl::client::QueryResponseFut<
3811 BufferCollectionCheckAllBuffersAllocatedResult,
3812 fidl::encoding::DefaultFuchsiaResourceDialect,
3813 >;
3814 fn r#check_all_buffers_allocated(&self) -> Self::CheckAllBuffersAllocatedResponseFut {
3815 fn _decode(
3816 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3817 ) -> Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error> {
3818 let _response = fidl::client::decode_transaction_body::<
3819 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
3820 fidl::encoding::DefaultFuchsiaResourceDialect,
3821 0x35a5fe77ce939c10,
3822 >(_buf?)?
3823 .into_result::<BufferCollectionMarker>("check_all_buffers_allocated")?;
3824 Ok(_response.map(|x| x))
3825 }
3826 self.client.send_query_and_decode::<
3827 fidl::encoding::EmptyPayload,
3828 BufferCollectionCheckAllBuffersAllocatedResult,
3829 >(
3830 (),
3831 0x35a5fe77ce939c10,
3832 fidl::encoding::DynamicFlags::FLEXIBLE,
3833 _decode,
3834 )
3835 }
3836
3837 fn r#attach_token(
3838 &self,
3839 mut payload: BufferCollectionAttachTokenRequest,
3840 ) -> Result<(), fidl::Error> {
3841 self.client.send::<BufferCollectionAttachTokenRequest>(
3842 &mut payload,
3843 0x46ac7d0008492982,
3844 fidl::encoding::DynamicFlags::FLEXIBLE,
3845 )
3846 }
3847
3848 fn r#attach_lifetime_tracking(
3849 &self,
3850 mut payload: BufferCollectionAttachLifetimeTrackingRequest,
3851 ) -> Result<(), fidl::Error> {
3852 self.client.send::<BufferCollectionAttachLifetimeTrackingRequest>(
3853 &mut payload,
3854 0x3ecb510113116dcf,
3855 fidl::encoding::DynamicFlags::FLEXIBLE,
3856 )
3857 }
3858}
3859
3860pub struct BufferCollectionEventStream {
3861 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
3862}
3863
3864impl std::marker::Unpin for BufferCollectionEventStream {}
3865
3866impl futures::stream::FusedStream for BufferCollectionEventStream {
3867 fn is_terminated(&self) -> bool {
3868 self.event_receiver.is_terminated()
3869 }
3870}
3871
3872impl futures::Stream for BufferCollectionEventStream {
3873 type Item = Result<BufferCollectionEvent, fidl::Error>;
3874
3875 fn poll_next(
3876 mut self: std::pin::Pin<&mut Self>,
3877 cx: &mut std::task::Context<'_>,
3878 ) -> std::task::Poll<Option<Self::Item>> {
3879 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
3880 &mut self.event_receiver,
3881 cx
3882 )?) {
3883 Some(buf) => std::task::Poll::Ready(Some(BufferCollectionEvent::decode(buf))),
3884 None => std::task::Poll::Ready(None),
3885 }
3886 }
3887}
3888
3889#[derive(Debug)]
3890pub enum BufferCollectionEvent {
3891 #[non_exhaustive]
3892 _UnknownEvent {
3893 /// Ordinal of the event that was sent.
3894 ordinal: u64,
3895 },
3896}
3897
3898impl BufferCollectionEvent {
3899 /// Decodes a message buffer as a [`BufferCollectionEvent`].
3900 fn decode(
3901 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
3902 ) -> Result<BufferCollectionEvent, fidl::Error> {
3903 let (bytes, _handles) = buf.split_mut();
3904 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
3905 debug_assert_eq!(tx_header.tx_id, 0);
3906 match tx_header.ordinal {
3907 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
3908 Ok(BufferCollectionEvent::_UnknownEvent { ordinal: tx_header.ordinal })
3909 }
3910 _ => Err(fidl::Error::UnknownOrdinal {
3911 ordinal: tx_header.ordinal,
3912 protocol_name:
3913 <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
3914 }),
3915 }
3916 }
3917}
3918
3919/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollection.
3920pub struct BufferCollectionRequestStream {
3921 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
3922 is_terminated: bool,
3923}
3924
3925impl std::marker::Unpin for BufferCollectionRequestStream {}
3926
3927impl futures::stream::FusedStream for BufferCollectionRequestStream {
3928 fn is_terminated(&self) -> bool {
3929 self.is_terminated
3930 }
3931}
3932
3933impl fidl::endpoints::RequestStream for BufferCollectionRequestStream {
3934 type Protocol = BufferCollectionMarker;
3935 type ControlHandle = BufferCollectionControlHandle;
3936
3937 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
3938 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
3939 }
3940
3941 fn control_handle(&self) -> Self::ControlHandle {
3942 BufferCollectionControlHandle { inner: self.inner.clone() }
3943 }
3944
3945 fn into_inner(
3946 self,
3947 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
3948 {
3949 (self.inner, self.is_terminated)
3950 }
3951
3952 fn from_inner(
3953 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
3954 is_terminated: bool,
3955 ) -> Self {
3956 Self { inner, is_terminated }
3957 }
3958}
3959
3960impl futures::Stream for BufferCollectionRequestStream {
3961 type Item = Result<BufferCollectionRequest, fidl::Error>;
3962
3963 fn poll_next(
3964 mut self: std::pin::Pin<&mut Self>,
3965 cx: &mut std::task::Context<'_>,
3966 ) -> std::task::Poll<Option<Self::Item>> {
3967 let this = &mut *self;
3968 if this.inner.check_shutdown(cx) {
3969 this.is_terminated = true;
3970 return std::task::Poll::Ready(None);
3971 }
3972 if this.is_terminated {
3973 panic!("polled BufferCollectionRequestStream after completion");
3974 }
3975 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
3976 |bytes, handles| {
3977 match this.inner.channel().read_etc(cx, bytes, handles) {
3978 std::task::Poll::Ready(Ok(())) => {}
3979 std::task::Poll::Pending => return std::task::Poll::Pending,
3980 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
3981 this.is_terminated = true;
3982 return std::task::Poll::Ready(None);
3983 }
3984 std::task::Poll::Ready(Err(e)) => {
3985 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
3986 e.into(),
3987 ))))
3988 }
3989 }
3990
3991 // A message has been received from the channel
3992 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
3993
3994 std::task::Poll::Ready(Some(match header.ordinal {
3995 0x11ac2555cf575b54 => {
3996 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
3997 let mut req = fidl::new_empty!(
3998 fidl::encoding::EmptyPayload,
3999 fidl::encoding::DefaultFuchsiaResourceDialect
4000 );
4001 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4002 let control_handle =
4003 BufferCollectionControlHandle { inner: this.inner.clone() };
4004 Ok(BufferCollectionRequest::Sync {
4005 responder: BufferCollectionSyncResponder {
4006 control_handle: std::mem::ManuallyDrop::new(control_handle),
4007 tx_id: header.tx_id,
4008 },
4009 })
4010 }
4011 0x6a5cae7d6d6e04c6 => {
4012 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4013 let mut req = fidl::new_empty!(
4014 fidl::encoding::EmptyPayload,
4015 fidl::encoding::DefaultFuchsiaResourceDialect
4016 );
4017 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4018 let control_handle =
4019 BufferCollectionControlHandle { inner: this.inner.clone() };
4020 Ok(BufferCollectionRequest::Release { control_handle })
4021 }
4022 0xb41f1624f48c1e9 => {
4023 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4024 let mut req = fidl::new_empty!(
4025 NodeSetNameRequest,
4026 fidl::encoding::DefaultFuchsiaResourceDialect
4027 );
4028 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
4029 let control_handle =
4030 BufferCollectionControlHandle { inner: this.inner.clone() };
4031 Ok(BufferCollectionRequest::SetName { payload: req, control_handle })
4032 }
4033 0x5cde8914608d99b1 => {
4034 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4035 let mut req = fidl::new_empty!(
4036 NodeSetDebugClientInfoRequest,
4037 fidl::encoding::DefaultFuchsiaResourceDialect
4038 );
4039 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
4040 let control_handle =
4041 BufferCollectionControlHandle { inner: this.inner.clone() };
4042 Ok(BufferCollectionRequest::SetDebugClientInfo {
4043 payload: req,
4044 control_handle,
4045 })
4046 }
4047 0x716b0af13d5c0806 => {
4048 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4049 let mut req = fidl::new_empty!(
4050 NodeSetDebugTimeoutLogDeadlineRequest,
4051 fidl::encoding::DefaultFuchsiaResourceDialect
4052 );
4053 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
4054 let control_handle =
4055 BufferCollectionControlHandle { inner: this.inner.clone() };
4056 Ok(BufferCollectionRequest::SetDebugTimeoutLogDeadline {
4057 payload: req,
4058 control_handle,
4059 })
4060 }
4061 0x5209c77415b4dfad => {
4062 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4063 let mut req = fidl::new_empty!(
4064 fidl::encoding::EmptyPayload,
4065 fidl::encoding::DefaultFuchsiaResourceDialect
4066 );
4067 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4068 let control_handle =
4069 BufferCollectionControlHandle { inner: this.inner.clone() };
4070 Ok(BufferCollectionRequest::SetVerboseLogging { control_handle })
4071 }
4072 0x5b3d0e51614df053 => {
4073 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4074 let mut req = fidl::new_empty!(
4075 fidl::encoding::EmptyPayload,
4076 fidl::encoding::DefaultFuchsiaResourceDialect
4077 );
4078 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4079 let control_handle =
4080 BufferCollectionControlHandle { inner: this.inner.clone() };
4081 Ok(BufferCollectionRequest::GetNodeRef {
4082 responder: BufferCollectionGetNodeRefResponder {
4083 control_handle: std::mem::ManuallyDrop::new(control_handle),
4084 tx_id: header.tx_id,
4085 },
4086 })
4087 }
4088 0x3a58e00157e0825 => {
4089 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4090 let mut req = fidl::new_empty!(
4091 NodeIsAlternateForRequest,
4092 fidl::encoding::DefaultFuchsiaResourceDialect
4093 );
4094 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
4095 let control_handle =
4096 BufferCollectionControlHandle { inner: this.inner.clone() };
4097 Ok(BufferCollectionRequest::IsAlternateFor {
4098 payload: req,
4099 responder: BufferCollectionIsAlternateForResponder {
4100 control_handle: std::mem::ManuallyDrop::new(control_handle),
4101 tx_id: header.tx_id,
4102 },
4103 })
4104 }
4105 0x77d19a494b78ba8c => {
4106 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4107 let mut req = fidl::new_empty!(
4108 fidl::encoding::EmptyPayload,
4109 fidl::encoding::DefaultFuchsiaResourceDialect
4110 );
4111 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4112 let control_handle =
4113 BufferCollectionControlHandle { inner: this.inner.clone() };
4114 Ok(BufferCollectionRequest::GetBufferCollectionId {
4115 responder: BufferCollectionGetBufferCollectionIdResponder {
4116 control_handle: std::mem::ManuallyDrop::new(control_handle),
4117 tx_id: header.tx_id,
4118 },
4119 })
4120 }
4121 0x22dd3ea514eeffe1 => {
4122 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4123 let mut req = fidl::new_empty!(
4124 fidl::encoding::EmptyPayload,
4125 fidl::encoding::DefaultFuchsiaResourceDialect
4126 );
4127 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4128 let control_handle =
4129 BufferCollectionControlHandle { inner: this.inner.clone() };
4130 Ok(BufferCollectionRequest::SetWeak { control_handle })
4131 }
4132 0x38a44fc4d7724be9 => {
4133 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4134 let mut req = fidl::new_empty!(
4135 NodeSetWeakOkRequest,
4136 fidl::encoding::DefaultFuchsiaResourceDialect
4137 );
4138 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
4139 let control_handle =
4140 BufferCollectionControlHandle { inner: this.inner.clone() };
4141 Ok(BufferCollectionRequest::SetWeakOk { payload: req, control_handle })
4142 }
4143 0x3f22f2a293d3cdac => {
4144 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4145 let mut req = fidl::new_empty!(
4146 NodeAttachNodeTrackingRequest,
4147 fidl::encoding::DefaultFuchsiaResourceDialect
4148 );
4149 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
4150 let control_handle =
4151 BufferCollectionControlHandle { inner: this.inner.clone() };
4152 Ok(BufferCollectionRequest::AttachNodeTracking {
4153 payload: req,
4154 control_handle,
4155 })
4156 }
4157 0x1fde0f19d650197b => {
4158 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4159 let mut req = fidl::new_empty!(
4160 BufferCollectionSetConstraintsRequest,
4161 fidl::encoding::DefaultFuchsiaResourceDialect
4162 );
4163 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionSetConstraintsRequest>(&header, _body_bytes, handles, &mut req)?;
4164 let control_handle =
4165 BufferCollectionControlHandle { inner: this.inner.clone() };
4166 Ok(BufferCollectionRequest::SetConstraints { payload: req, control_handle })
4167 }
4168 0x62300344b61404e => {
4169 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4170 let mut req = fidl::new_empty!(
4171 fidl::encoding::EmptyPayload,
4172 fidl::encoding::DefaultFuchsiaResourceDialect
4173 );
4174 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4175 let control_handle =
4176 BufferCollectionControlHandle { inner: this.inner.clone() };
4177 Ok(BufferCollectionRequest::WaitForAllBuffersAllocated {
4178 responder: BufferCollectionWaitForAllBuffersAllocatedResponder {
4179 control_handle: std::mem::ManuallyDrop::new(control_handle),
4180 tx_id: header.tx_id,
4181 },
4182 })
4183 }
4184 0x35a5fe77ce939c10 => {
4185 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4186 let mut req = fidl::new_empty!(
4187 fidl::encoding::EmptyPayload,
4188 fidl::encoding::DefaultFuchsiaResourceDialect
4189 );
4190 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4191 let control_handle =
4192 BufferCollectionControlHandle { inner: this.inner.clone() };
4193 Ok(BufferCollectionRequest::CheckAllBuffersAllocated {
4194 responder: BufferCollectionCheckAllBuffersAllocatedResponder {
4195 control_handle: std::mem::ManuallyDrop::new(control_handle),
4196 tx_id: header.tx_id,
4197 },
4198 })
4199 }
4200 0x46ac7d0008492982 => {
4201 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4202 let mut req = fidl::new_empty!(
4203 BufferCollectionAttachTokenRequest,
4204 fidl::encoding::DefaultFuchsiaResourceDialect
4205 );
4206 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionAttachTokenRequest>(&header, _body_bytes, handles, &mut req)?;
4207 let control_handle =
4208 BufferCollectionControlHandle { inner: this.inner.clone() };
4209 Ok(BufferCollectionRequest::AttachToken { payload: req, control_handle })
4210 }
4211 0x3ecb510113116dcf => {
4212 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4213 let mut req = fidl::new_empty!(
4214 BufferCollectionAttachLifetimeTrackingRequest,
4215 fidl::encoding::DefaultFuchsiaResourceDialect
4216 );
4217 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionAttachLifetimeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
4218 let control_handle =
4219 BufferCollectionControlHandle { inner: this.inner.clone() };
4220 Ok(BufferCollectionRequest::AttachLifetimeTracking {
4221 payload: req,
4222 control_handle,
4223 })
4224 }
4225 _ if header.tx_id == 0
4226 && header
4227 .dynamic_flags()
4228 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
4229 {
4230 Ok(BufferCollectionRequest::_UnknownMethod {
4231 ordinal: header.ordinal,
4232 control_handle: BufferCollectionControlHandle {
4233 inner: this.inner.clone(),
4234 },
4235 method_type: fidl::MethodType::OneWay,
4236 })
4237 }
4238 _ if header
4239 .dynamic_flags()
4240 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
4241 {
4242 this.inner.send_framework_err(
4243 fidl::encoding::FrameworkErr::UnknownMethod,
4244 header.tx_id,
4245 header.ordinal,
4246 header.dynamic_flags(),
4247 (bytes, handles),
4248 )?;
4249 Ok(BufferCollectionRequest::_UnknownMethod {
4250 ordinal: header.ordinal,
4251 control_handle: BufferCollectionControlHandle {
4252 inner: this.inner.clone(),
4253 },
4254 method_type: fidl::MethodType::TwoWay,
4255 })
4256 }
4257 _ => Err(fidl::Error::UnknownOrdinal {
4258 ordinal: header.ordinal,
4259 protocol_name:
4260 <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
4261 }),
4262 }))
4263 },
4264 )
4265 }
4266}
4267
4268/// [`fuchsia.sysmem2/BufferCollection`] is a connection directly from a
4269/// participant to sysmem re. a buffer collection; often the buffer collection
4270/// is shared with other participants which have their own `BufferCollection`
4271/// client end(s) associated with the same buffer collection. In other words,
4272/// an instance of the `BufferCollection` interface is a view of a buffer
4273/// collection, not the buffer collection itself.
4274///
4275/// The `BufferCollection` connection exists to facilitate async indication of
4276/// when the buffer collection has been populated with buffers.
4277///
4278/// Also, the channel's closure by the sysmem server is an indication to the
4279/// client that the client should close all VMO handles that were obtained from
4280/// the `BufferCollection` ASAP.
4281///
4282/// Some buffer collections can use enough memory that it can be worth avoiding
4283/// allocation overlap (in time) using
4284/// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] so that the
4285/// initiator can tell when enough buffers of the buffer collection have been
4286/// fully deallocated prior to the initiator allocating a new buffer collection.
4287///
4288/// Epitaphs are not used in this protocol.
4289#[derive(Debug)]
4290pub enum BufferCollectionRequest {
4291 /// Ensure that previous messages have been received server side. This is
4292 /// particularly useful after previous messages that created new tokens,
4293 /// because a token must be known to the sysmem server before sending the
4294 /// token to another participant.
4295 ///
4296 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
4297 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
4298 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
4299 /// to mitigate the possibility of a hostile/fake
4300 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
4301 /// Another way is to pass the token to
4302 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
4303 /// the token as part of exchanging it for a
4304 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
4305 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
4306 /// of stalling.
4307 ///
4308 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
4309 /// and then starting and completing a `Sync`, it's then safe to send the
4310 /// `BufferCollectionToken` client ends to other participants knowing the
4311 /// server will recognize the tokens when they're sent by the other
4312 /// participants to sysmem in a
4313 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
4314 /// efficient way to create tokens while avoiding unnecessary round trips.
4315 ///
4316 /// Other options include waiting for each
4317 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
4318 /// individually (using separate call to `Sync` after each), or calling
4319 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
4320 /// converted to a `BufferCollection` via
4321 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
4322 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
4323 /// the sync step and can create multiple tokens at once.
4324 Sync { responder: BufferCollectionSyncResponder },
4325 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
4326 ///
4327 /// Normally a participant will convert a `BufferCollectionToken` into a
4328 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
4329 /// `Release` via the token (and then close the channel immediately or
4330 /// shortly later in response to server closing the server end), which
4331 /// avoids causing buffer collection failure. Without a prior `Release`,
4332 /// closing the `BufferCollectionToken` client end will cause buffer
4333 /// collection failure.
4334 ///
4335 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
4336 ///
4337 /// By default the server handles unexpected closure of a
4338 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
4339 /// first) by failing the buffer collection. Partly this is to expedite
4340 /// closing VMO handles to reclaim memory when any participant fails. If a
4341 /// participant would like to cleanly close a `BufferCollection` without
4342 /// causing buffer collection failure, the participant can send `Release`
4343 /// before closing the `BufferCollection` client end. The `Release` can
4344 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
4345 /// buffer collection won't require constraints from this node in order to
4346 /// allocate. If after `SetConstraints`, the constraints are retained and
4347 /// aggregated, despite the lack of `BufferCollection` connection at the
4348 /// time of constraints aggregation.
4349 ///
4350 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
4351 ///
4352 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
4353 /// end (without `Release` first) will trigger failure of the buffer
4354 /// collection. To close a `BufferCollectionTokenGroup` channel without
4355 /// failing the buffer collection, ensure that AllChildrenPresent() has been
4356 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
4357 /// client end.
4358 ///
4359 /// If `Release` occurs before
4360 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
4361 /// buffer collection will fail (triggered by reception of `Release` without
4362 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
4363 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
4364 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
4365 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
4366 /// close requires `AllChildrenPresent` (if not already sent), then
4367 /// `Release`, then close client end.
4368 ///
4369 /// If `Release` occurs after `AllChildrenPresent`, the children and all
4370 /// their constraints remain intact (just as they would if the
4371 /// `BufferCollectionTokenGroup` channel had remained open), and the client
4372 /// end close doesn't trigger buffer collection failure.
4373 ///
4374 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
4375 ///
4376 /// For brevity, the per-channel-protocol paragraphs above ignore the
4377 /// separate failure domain created by
4378 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
4379 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
4380 /// unexpectedly closes (without `Release` first) and that client end is
4381 /// under a failure domain, instead of failing the whole buffer collection,
4382 /// the failure domain is failed, but the buffer collection itself is
4383 /// isolated from failure of the failure domain. Such failure domains can be
4384 /// nested, in which case only the inner-most failure domain in which the
4385 /// `Node` resides fails.
4386 Release { control_handle: BufferCollectionControlHandle },
4387 /// Set a name for VMOs in this buffer collection.
4388 ///
4389 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
4390 /// will be truncated to fit. The name of the vmo will be suffixed with the
4391 /// buffer index within the collection (if the suffix fits within
4392 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
4393 /// listed in the inspect data.
4394 ///
4395 /// The name only affects VMOs allocated after the name is set; this call
4396 /// does not rename existing VMOs. If multiple clients set different names
4397 /// then the larger priority value will win. Setting a new name with the
4398 /// same priority as a prior name doesn't change the name.
4399 ///
4400 /// All table fields are currently required.
4401 ///
4402 /// + request `priority` The name is only set if this is the first `SetName`
4403 /// or if `priority` is greater than any previous `priority` value in
4404 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
4405 /// + request `name` The name for VMOs created under this buffer collection.
4406 SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionControlHandle },
4407 /// Set information about the current client that can be used by sysmem to
4408 /// help diagnose leaking memory and allocation stalls waiting for a
4409 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
4410 ///
4411 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
4412 /// `Node`(s) derived from this `Node`, unless overriden by
4413 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
4414 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
4415 ///
4416 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
4417 /// `Allocator` is the most efficient way to ensure that all
4418 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
4419 /// set, and is also more efficient than separately sending the same debug
4420 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
4421 /// created [`fuchsia.sysmem2/Node`].
4422 ///
4423 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
4424 /// indicate which client is closing their channel first, leading to subtree
4425 /// failure (which can be normal if the purpose of the subtree is over, but
4426 /// if happening earlier than expected, the client-channel-specific name can
4427 /// help diagnose where the failure is first coming from, from sysmem's
4428 /// point of view).
4429 ///
4430 /// All table fields are currently required.
4431 ///
4432 /// + request `name` This can be an arbitrary string, but the current
4433 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
4434 /// + request `id` This can be an arbitrary id, but the current process ID
4435 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
4436 SetDebugClientInfo {
4437 payload: NodeSetDebugClientInfoRequest,
4438 control_handle: BufferCollectionControlHandle,
4439 },
4440 /// Sysmem logs a warning if sysmem hasn't seen
4441 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
4442 /// within 5 seconds after creation of a new collection.
4443 ///
4444 /// Clients can call this method to change when the log is printed. If
4445 /// multiple client set the deadline, it's unspecified which deadline will
4446 /// take effect.
4447 ///
4448 /// In most cases the default works well.
4449 ///
4450 /// All table fields are currently required.
4451 ///
4452 /// + request `deadline` The time at which sysmem will start trying to log
4453 /// the warning, unless all constraints are with sysmem by then.
4454 SetDebugTimeoutLogDeadline {
4455 payload: NodeSetDebugTimeoutLogDeadlineRequest,
4456 control_handle: BufferCollectionControlHandle,
4457 },
4458 /// This enables verbose logging for the buffer collection.
4459 ///
4460 /// Verbose logging includes constraints set via
4461 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
4462 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
4463 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
4464 /// the tree of `Node`(s).
4465 ///
4466 /// Normally sysmem prints only a single line complaint when aggregation
4467 /// fails, with just the specific detailed reason that aggregation failed,
4468 /// with little surrounding context. While this is often enough to diagnose
4469 /// a problem if only a small change was made and everything was working
4470 /// before the small change, it's often not particularly helpful for getting
4471 /// a new buffer collection to work for the first time. Especially with
4472 /// more complex trees of nodes, involving things like
4473 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
4474 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
4475 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
4476 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
4477 /// looks like and why it's failing a logical allocation, or why a tree or
4478 /// subtree is failing sooner than expected.
4479 ///
4480 /// The intent of the extra logging is to be acceptable from a performance
4481 /// point of view, under the assumption that verbose logging is only enabled
4482 /// on a low number of buffer collections. If we're not tracking down a bug,
4483 /// we shouldn't send this message.
4484 SetVerboseLogging { control_handle: BufferCollectionControlHandle },
4485 /// This gets a handle that can be used as a parameter to
4486 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
4487 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
4488 /// client obtained this handle from this `Node`.
4489 ///
4490 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
4491 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
4492 /// despite the two calls typically being on different channels.
4493 ///
4494 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
4495 ///
4496 /// All table fields are currently required.
4497 ///
4498 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
4499 /// different `Node` channel, to prove that the client obtained the handle
4500 /// from this `Node`.
4501 GetNodeRef { responder: BufferCollectionGetNodeRefResponder },
4502 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
4503 /// rooted at a different child token of a common parent
4504 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
4505 /// passed-in `node_ref`.
4506 ///
4507 /// This call is for assisting with admission control de-duplication, and
4508 /// with debugging.
4509 ///
4510 /// The `node_ref` must be obtained using
4511 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
4512 ///
4513 /// The `node_ref` can be a duplicated handle; it's not necessary to call
4514 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
4515 ///
4516 /// If a calling token may not actually be a valid token at all due to a
4517 /// potentially hostile/untrusted provider of the token, call
4518 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
4519 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
4520 /// never responds due to a calling token not being a real token (not really
4521 /// talking to sysmem). Another option is to call
4522 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
4523 /// which also validates the token along with converting it to a
4524 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
4525 ///
4526 /// All table fields are currently required.
4527 ///
4528 /// - response `is_alternate`
4529 /// - true: The first parent node in common between the calling node and
4530 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
4531 /// that the calling `Node` and the `node_ref` `Node` will not have both
4532 /// their constraints apply - rather sysmem will choose one or the other
4533 /// of the constraints - never both. This is because only one child of
4534 /// a `BufferCollectionTokenGroup` is selected during logical
4535 /// allocation, with only that one child's subtree contributing to
4536 /// constraints aggregation.
4537 /// - false: The first parent node in common between the calling `Node`
4538 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
4539 /// Currently, this means the first parent node in common is a
4540 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
4541 /// `Release`ed). This means that the calling `Node` and the `node_ref`
4542 /// `Node` may have both their constraints apply during constraints
4543 /// aggregation of the logical allocation, if both `Node`(s) are
4544 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
4545 /// this case, there is no `BufferCollectionTokenGroup` that will
4546 /// directly prevent the two `Node`(s) from both being selected and
4547 /// their constraints both aggregated, but even when false, one or both
4548 /// `Node`(s) may still be eliminated from consideration if one or both
4549 /// `Node`(s) has a direct or indirect parent
4550 /// `BufferCollectionTokenGroup` which selects a child subtree other
4551 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
4552 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
4553 /// associated with the same buffer collection as the calling `Node`.
4554 /// Another reason for this error is if the `node_ref` is an
4555 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
4556 /// a real `node_ref` obtained from `GetNodeRef`.
4557 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
4558 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
4559 /// the needed rights expected on a real `node_ref`.
4560 /// * No other failing status codes are returned by this call. However,
4561 /// sysmem may add additional codes in future, so the client should have
4562 /// sensible default handling for any failing status code.
4563 IsAlternateFor {
4564 payload: NodeIsAlternateForRequest,
4565 responder: BufferCollectionIsAlternateForResponder,
4566 },
4567 /// Get the buffer collection ID. This ID is also available from
4568 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
4569 /// within the collection).
4570 ///
4571 /// This call is mainly useful in situations where we can't convey a
4572 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
4573 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
4574 /// handle, which can be joined back up with a `BufferCollection` client end
4575 /// that was created via a different path. Prefer to convey a
4576 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
4577 ///
4578 /// Trusting a `buffer_collection_id` value from a source other than sysmem
4579 /// is analogous to trusting a koid value from a source other than zircon.
4580 /// Both should be avoided unless really necessary, and both require
4581 /// caution. In some situations it may be reasonable to refer to a
4582 /// pre-established `BufferCollection` by `buffer_collection_id` via a
4583 /// protocol for efficiency reasons, but an incoming value purporting to be
4584 /// a `buffer_collection_id` is not sufficient alone to justify granting the
4585 /// sender of the `buffer_collection_id` any capability. The sender must
4586 /// first prove to a receiver that the sender has/had a VMO or has/had a
4587 /// `BufferCollectionToken` to the same collection by sending a handle that
4588 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
4589 /// `buffer_collection_id` value. The receiver should take care to avoid
4590 /// assuming that a sender had a `BufferCollectionToken` in cases where the
4591 /// sender has only proven that the sender had a VMO.
4592 ///
4593 /// - response `buffer_collection_id` This ID is unique per buffer
4594 /// collection per boot. Each buffer is uniquely identified by the
4595 /// `buffer_collection_id` and `buffer_index` together.
4596 GetBufferCollectionId { responder: BufferCollectionGetBufferCollectionIdResponder },
4597 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
4598 /// created after this message to weak, which means that a client's `Node`
4599 /// client end (or a child created after this message) is not alone
4600 /// sufficient to keep allocated VMOs alive.
4601 ///
4602 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
4603 /// `close_weak_asap`.
4604 ///
4605 /// This message is only permitted before the `Node` becomes ready for
4606 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
4607 /// * `BufferCollectionToken`: any time
4608 /// * `BufferCollection`: before `SetConstraints`
4609 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
4610 ///
4611 /// Currently, no conversion from strong `Node` to weak `Node` after ready
4612 /// for allocation is provided, but a client can simulate that by creating
4613 /// an additional `Node` before allocation and setting that additional
4614 /// `Node` to weak, and then potentially at some point later sending
4615 /// `Release` and closing the client end of the client's strong `Node`, but
4616 /// keeping the client's weak `Node`.
4617 ///
4618 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
4619 /// collection failure (all `Node` client end(s) will see
4620 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
4621 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
4622 /// this situation until all `Node`(s) are ready for allocation. For initial
4623 /// allocation to succeed, at least one strong `Node` is required to exist
4624 /// at allocation time, but after that client receives VMO handles, that
4625 /// client can `BufferCollection.Release` and close the client end without
4626 /// causing this type of failure.
4627 ///
4628 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
4629 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
4630 /// separately as appropriate.
4631 SetWeak { control_handle: BufferCollectionControlHandle },
4632 /// This indicates to sysmem that the client is prepared to pay attention to
4633 /// `close_weak_asap`.
4634 ///
4635 /// If sent, this message must be before
4636 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
4637 ///
4638 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
4639 /// send this message before `WaitForAllBuffersAllocated`, or a parent
4640 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
4641 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
4642 /// trigger buffer collection failure.
4643 ///
4644 /// This message is necessary because weak sysmem VMOs have not always been
4645 /// a thing, so older clients are not aware of the need to pay attention to
4646 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
4647 /// sysmem weak VMO handles asap. By having this message and requiring
4648 /// participants to indicate their acceptance of this aspect of the overall
4649 /// protocol, we avoid situations where an older client is delivered a weak
4650 /// VMO without any way for sysmem to get that VMO to close quickly later
4651 /// (and on a per-buffer basis).
4652 ///
4653 /// A participant that doesn't handle `close_weak_asap` and also doesn't
4654 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
4655 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
4656 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
4657 /// same participant has a child/delegate which does retrieve VMOs, that
4658 /// child/delegate will need to send `SetWeakOk` before
4659 /// `WaitForAllBuffersAllocated`.
4660 ///
4661 /// + request `for_child_nodes_also` If present and true, this means direct
4662 /// child nodes of this node created after this message plus all
4663 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
4664 /// those nodes. Any child node of this node that was created before this
4665 /// message is not included. This setting is "sticky" in the sense that a
4666 /// subsequent `SetWeakOk` without this bool set to true does not reset
4667 /// the server-side bool. If this creates a problem for a participant, a
4668 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
4669 /// tokens instead, as appropriate. A participant should only set
4670 /// `for_child_nodes_also` true if the participant can really promise to
4671 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
4672 /// weak VMO handles held by participants holding the corresponding child
4673 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
4674 /// which are using sysmem(1) can be weak, despite the clients of those
4675 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
4676 /// direct way to find out about `close_weak_asap`. This only applies to
4677 /// descendents of this `Node` which are using sysmem(1), not to this
4678 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
4679 /// token, which will fail allocation unless an ancestor of this `Node`
4680 /// specified `for_child_nodes_also` true.
4681 SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: BufferCollectionControlHandle },
4682 /// The server_end will be closed after this `Node` and any child nodes have
4683 /// have released their buffer counts, making those counts available for
4684 /// reservation by a different `Node` via
4685 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
4686 ///
4687 /// The `Node` buffer counts may not be released until the entire tree of
4688 /// `Node`(s) is closed or failed, because
4689 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
4690 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
4691 /// `Node` buffer counts remain reserved until the orphaned node is later
4692 /// cleaned up.
4693 ///
4694 /// If the `Node` exceeds a fairly large number of attached eventpair server
4695 /// ends, a log message will indicate this and the `Node` (and the
4696 /// appropriate) sub-tree will fail.
4697 ///
4698 /// The `server_end` will remain open when
4699 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
4700 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
4701 /// [`fuchsia.sysmem2/BufferCollection`].
4702 ///
4703 /// This message can also be used with a
4704 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
4705 AttachNodeTracking {
4706 payload: NodeAttachNodeTrackingRequest,
4707 control_handle: BufferCollectionControlHandle,
4708 },
4709 /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
4710 /// collection.
4711 ///
4712 /// A participant may only call
4713 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
4714 /// [`fuchsia.sysmem2/BufferCollection`].
4715 ///
4716 /// For buffer allocation to be attempted, all holders of a
4717 /// `BufferCollection` client end need to call `SetConstraints` before
4718 /// sysmem will attempt to allocate buffers.
4719 ///
4720 /// + request `constraints` These are the constraints on the buffer
4721 /// collection imposed by the sending client/participant. The
4722 /// `constraints` field is not required to be set. If not set, the client
4723 /// is not setting any actual constraints, but is indicating that the
4724 /// client has no constraints to set. A client that doesn't set the
4725 /// `constraints` field won't receive any VMO handles, but can still find
4726 /// out how many buffers were allocated and can still refer to buffers by
4727 /// their `buffer_index`.
4728 SetConstraints {
4729 payload: BufferCollectionSetConstraintsRequest,
4730 control_handle: BufferCollectionControlHandle,
4731 },
4732 /// Wait until all buffers are allocated.
4733 ///
4734 /// This FIDL call completes when buffers have been allocated, or completes
4735 /// with some failure detail if allocation has been attempted but failed.
4736 ///
4737 /// The following must occur before buffers will be allocated:
4738 /// * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
4739 /// collection must be turned in via `BindSharedCollection` to get a
4740 /// [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
4741 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
4742 /// or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
4743 /// to them.
4744 /// * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
4745 /// must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
4746 /// sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
4747 /// sent to them.
4748 ///
4749 /// - result `buffer_collection_info` The VMO handles and other related
4750 /// info.
4751 /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
4752 /// cannot be fulfilled due to resource exhaustion.
4753 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
4754 /// malformed.
4755 /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
4756 /// request is valid but cannot be satisfied, perhaps due to hardware
4757 /// limitations. This can happen if participants have incompatible
4758 /// constraints (empty intersection, roughly speaking). See the log for
4759 /// more info. In cases where a participant could potentially be treated
4760 /// as optional, see [`BufferCollectionTokenGroup`]. When using
4761 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
4762 /// error code if there aren't enough buffers in the pre-existing
4763 /// collection to satisfy the constraints set on the attached token and
4764 /// any sub-tree of tokens derived from the attached token.
4765 WaitForAllBuffersAllocated { responder: BufferCollectionWaitForAllBuffersAllocatedResponder },
4766 /// Checks whether all the buffers have been allocated, in a polling
4767 /// fashion.
4768 ///
4769 /// * If the buffer collection has been allocated, returns success.
4770 /// * If the buffer collection failed allocation, returns the same
4771 /// [`fuchsia.sysmem2/Error`] as
4772 /// [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
4773 /// return.
4774 /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
4775 /// attempted allocation yet. This means that WaitForAllBuffersAllocated
4776 /// would not respond quickly.
4777 CheckAllBuffersAllocated { responder: BufferCollectionCheckAllBuffersAllocatedResponder },
4778 /// Create a new token to add a new participant to an existing logical
4779 /// buffer collection, if the existing collection's buffer counts,
4780 /// constraints, and participants allow.
4781 ///
4782 /// This can be useful in replacing a failed participant, and/or in
4783 /// adding/re-adding a participant after buffers have already been
4784 /// allocated.
4785 ///
4786 /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
4787 /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
4788 /// goes through the normal procedure of setting constraints or closing
4789 /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
4790 /// clients' point of view, despite the possibility that all the buffers
4791 /// were actually allocated previously. This process is called "logical
4792 /// allocation". Most instances of "allocation" in docs for other messages
4793 /// can also be read as "allocation or logical allocation" while remaining
4794 /// valid, but we just say "allocation" in most places for brevity/clarity
4795 /// of explanation, with the details of "logical allocation" left for the
4796 /// docs here on `AttachToken`.
4797 ///
4798 /// Failure of an attached `Node` does not propagate to the parent of the
4799 /// attached `Node`. More generally, failure of a child `Node` is blocked
4800 /// from reaching its parent `Node` if the child is attached, or if the
4801 /// child is dispensable and the failure occurred after logical allocation
4802 /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
4803 ///
4804 /// A participant may in some scenarios choose to initially use a
4805 /// dispensable token for a given instance of a delegate participant, and
4806 /// then later if the first instance of that delegate participant fails, a
4807 /// new second instance of that delegate participant my be given a token
4808 /// created with `AttachToken`.
4809 ///
4810 /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
4811 /// client end, the token acts like any other token. The client can
4812 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
4813 /// and can send the token to a different process/participant. The
4814 /// `BufferCollectionToken` `Node` should be converted to a
4815 /// `BufferCollection` `Node` as normal by sending
4816 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
4817 /// without causing subtree failure by sending
4818 /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
4819 /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
4820 /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
4821 /// the `BufferCollection`.
4822 ///
4823 /// Within the subtree, a success result from
4824 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
4825 /// the subtree participants' constraints were satisfiable using the
4826 /// already-existing buffer collection, the already-established
4827 /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
4828 /// constraints, and the already-existing other participants (already added
4829 /// via successful logical allocation) and their specified buffer counts in
4830 /// their constraints. A failure result means the new participants'
4831 /// constraints cannot be satisfied using the existing buffer collection and
4832 /// its already-added participants. Creating a new collection instead may
4833 /// allow all participants' constraints to be satisfied, assuming
4834 /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
4835 /// used.
4836 ///
4837 /// A token created with `AttachToken` performs constraints aggregation with
4838 /// all constraints currently in effect on the buffer collection, plus the
4839 /// attached token under consideration plus child tokens under the attached
4840 /// token which are not themselves an attached token or under such a token.
4841 /// Further subtrees under this subtree are considered for logical
4842 /// allocation only after this subtree has completed logical allocation.
4843 ///
4844 /// Assignment of existing buffers to participants'
4845 /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
4846 /// etc is first-come first-served, but a child can't logically allocate
4847 /// before all its parents have sent `SetConstraints`.
4848 ///
4849 /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
4850 /// in contrast to `AttachToken`, has the created token `Node` + child
4851 /// `Node`(s) (in the created subtree but not in any subtree under this
4852 /// subtree) participate in constraints aggregation along with its parent
4853 /// during the parent's allocation or logical allocation.
4854 ///
4855 /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
4856 /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
4857 /// sysmem before the new token can be passed to `BindSharedCollection`. The
4858 /// `Sync` of the new token can be accomplished with
4859 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
4860 /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
4861 /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
4862 /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
4863 /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
4864 /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
4865 /// created token, to also sync those additional tokens to sysmem using a
4866 /// single round-trip.
4867 ///
4868 /// All table fields are currently required.
4869 ///
4870 /// + request `rights_attentuation_mask` This allows attenuating the VMO
4871 /// rights of the subtree. These values for `rights_attenuation_mask`
4872 /// result in no attenuation (note that 0 is not on this list):
4873 /// + ZX_RIGHT_SAME_RIGHTS (preferred)
4874 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
4875 /// + request `token_request` The server end of the `BufferCollectionToken`
4876 /// channel. The client retains the client end.
4877 AttachToken {
4878 payload: BufferCollectionAttachTokenRequest,
4879 control_handle: BufferCollectionControlHandle,
4880 },
4881 /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
4882 /// buffers have been allocated and only the specified number of buffers (or
4883 /// fewer) remain in the buffer collection.
4884 ///
4885 /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
4886 /// client to wait until an old buffer collection is fully or mostly
4887 /// deallocated before attempting allocation of a new buffer collection. The
4888 /// eventpair is only signalled when the buffers of this collection have
4889 /// been fully deallocated (not just un-referenced by clients, but all the
4890 /// memory consumed by those buffers has been fully reclaimed/recycled), or
4891 /// when allocation or logical allocation fails for the tree or subtree
4892 /// including this [`fuchsia.sysmem2/BufferCollection`].
4893 ///
4894 /// The eventpair won't be signalled until allocation or logical allocation
4895 /// has completed; until then, the collection's current buffer count is
4896 /// ignored.
4897 ///
4898 /// If logical allocation fails for an attached subtree (using
4899 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
4900 /// eventpair will close during that failure regardless of the number of
4901 /// buffers potenitally allocated in the overall buffer collection. This is
4902 /// for logical allocation consistency with normal allocation.
4903 ///
4904 /// The lifetime signalled by this event includes asynchronous cleanup of
4905 /// allocated buffers, and this asynchronous cleanup cannot occur until all
4906 /// holders of VMO handles to the buffers have closed those VMO handles.
4907 /// Therefore, clients should take care not to become blocked forever
4908 /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
4909 /// participants using the logical buffer collection (including the waiter
4910 /// itself) are less trusted, less reliable, or potentially blocked by the
4911 /// wait itself. Waiting asynchronously is recommended. Setting a deadline
4912 /// for the client wait may be prudent, depending on details of how the
4913 /// collection and/or its VMOs are used or shared. Failure to allocate a
4914 /// new/replacement buffer collection is better than getting stuck forever.
4915 ///
4916 /// The sysmem server itself intentionally does not perform any waiting on
4917 /// already-failed collections' VMOs to finish cleaning up before attempting
4918 /// a new allocation, and the sysmem server intentionally doesn't retry
4919 /// allocation if a new allocation fails due to out of memory, even if that
4920 /// failure is potentially due to continued existence of an old collection's
4921 /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
4922 /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
4923 /// as long as the waiting client is careful to not create a deadlock.
4924 ///
4925 /// Continued existence of old collections that are still cleaning up is not
4926 /// the only reason that a new allocation may fail due to insufficient
4927 /// memory, even if the new allocation is allocating physically contiguous
4928 /// buffers. Overall system memory pressure can also be the cause of failure
4929 /// to allocate a new collection. See also
4930 /// [`fuchsia.memorypressure/Provider`].
4931 ///
4932 /// `AttachLifetimeTracking` is meant to be compatible with other protocols
4933 /// with a similar `AttachLifetimeTracking` message; duplicates of the same
4934 /// `eventpair` handle (server end) can be sent via more than one
4935 /// `AttachLifetimeTracking` message to different protocols, and the
4936 /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
4937 /// the conditions are met (all holders of duplicates have closed their
4938 /// server end handle(s)). Also, thanks to how eventpair endponts work, the
4939 /// client end can (also) be duplicated without preventing the
4940 /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
4941 ///
4942 /// The server intentionally doesn't "trust" any signals set on the
4943 /// `server_end`. This mechanism intentionally uses only
4944 /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
4945 /// "early", and is only set when all handles to the server end eventpair
4946 /// are closed. No meaning is associated with any of the other signals, and
4947 /// clients should ignore any other signal bits on either end of the
4948 /// `eventpair`.
4949 ///
4950 /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
4951 /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
4952 /// transfer without causing `BufferCollection` channel failure).
4953 ///
4954 /// All table fields are currently required.
4955 ///
4956 /// + request `server_end` This eventpair handle will be closed by the
4957 /// sysmem server when buffers have been allocated initially and the
4958 /// number of buffers is then less than or equal to `buffers_remaining`.
4959 /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
4960 /// fewer) buffers to be fully deallocated. A number greater than zero can
4961 /// be useful in situations where a known number of buffers are
4962 /// intentionally not closed so that the data can continue to be used,
4963 /// such as for keeping the last available video frame displayed in the UI
4964 /// even if the video stream was using protected output buffers. It's
4965 /// outside the scope of the `BufferCollection` interface (at least for
4966 /// now) to determine how many buffers may be held without closing, but
4967 /// it'll typically be in the range 0-2.
4968 AttachLifetimeTracking {
4969 payload: BufferCollectionAttachLifetimeTrackingRequest,
4970 control_handle: BufferCollectionControlHandle,
4971 },
4972 /// An interaction was received which does not match any known method.
4973 #[non_exhaustive]
4974 _UnknownMethod {
4975 /// Ordinal of the method that was called.
4976 ordinal: u64,
4977 control_handle: BufferCollectionControlHandle,
4978 method_type: fidl::MethodType,
4979 },
4980}
4981
4982impl BufferCollectionRequest {
4983 #[allow(irrefutable_let_patterns)]
4984 pub fn into_sync(self) -> Option<(BufferCollectionSyncResponder)> {
4985 if let BufferCollectionRequest::Sync { responder } = self {
4986 Some((responder))
4987 } else {
4988 None
4989 }
4990 }
4991
4992 #[allow(irrefutable_let_patterns)]
4993 pub fn into_release(self) -> Option<(BufferCollectionControlHandle)> {
4994 if let BufferCollectionRequest::Release { control_handle } = self {
4995 Some((control_handle))
4996 } else {
4997 None
4998 }
4999 }
5000
5001 #[allow(irrefutable_let_patterns)]
5002 pub fn into_set_name(self) -> Option<(NodeSetNameRequest, BufferCollectionControlHandle)> {
5003 if let BufferCollectionRequest::SetName { payload, control_handle } = self {
5004 Some((payload, control_handle))
5005 } else {
5006 None
5007 }
5008 }
5009
5010 #[allow(irrefutable_let_patterns)]
5011 pub fn into_set_debug_client_info(
5012 self,
5013 ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionControlHandle)> {
5014 if let BufferCollectionRequest::SetDebugClientInfo { payload, control_handle } = self {
5015 Some((payload, control_handle))
5016 } else {
5017 None
5018 }
5019 }
5020
5021 #[allow(irrefutable_let_patterns)]
5022 pub fn into_set_debug_timeout_log_deadline(
5023 self,
5024 ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionControlHandle)> {
5025 if let BufferCollectionRequest::SetDebugTimeoutLogDeadline { payload, control_handle } =
5026 self
5027 {
5028 Some((payload, control_handle))
5029 } else {
5030 None
5031 }
5032 }
5033
5034 #[allow(irrefutable_let_patterns)]
5035 pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionControlHandle)> {
5036 if let BufferCollectionRequest::SetVerboseLogging { control_handle } = self {
5037 Some((control_handle))
5038 } else {
5039 None
5040 }
5041 }
5042
5043 #[allow(irrefutable_let_patterns)]
5044 pub fn into_get_node_ref(self) -> Option<(BufferCollectionGetNodeRefResponder)> {
5045 if let BufferCollectionRequest::GetNodeRef { responder } = self {
5046 Some((responder))
5047 } else {
5048 None
5049 }
5050 }
5051
5052 #[allow(irrefutable_let_patterns)]
5053 pub fn into_is_alternate_for(
5054 self,
5055 ) -> Option<(NodeIsAlternateForRequest, BufferCollectionIsAlternateForResponder)> {
5056 if let BufferCollectionRequest::IsAlternateFor { payload, responder } = self {
5057 Some((payload, responder))
5058 } else {
5059 None
5060 }
5061 }
5062
5063 #[allow(irrefutable_let_patterns)]
5064 pub fn into_get_buffer_collection_id(
5065 self,
5066 ) -> Option<(BufferCollectionGetBufferCollectionIdResponder)> {
5067 if let BufferCollectionRequest::GetBufferCollectionId { responder } = self {
5068 Some((responder))
5069 } else {
5070 None
5071 }
5072 }
5073
5074 #[allow(irrefutable_let_patterns)]
5075 pub fn into_set_weak(self) -> Option<(BufferCollectionControlHandle)> {
5076 if let BufferCollectionRequest::SetWeak { control_handle } = self {
5077 Some((control_handle))
5078 } else {
5079 None
5080 }
5081 }
5082
5083 #[allow(irrefutable_let_patterns)]
5084 pub fn into_set_weak_ok(self) -> Option<(NodeSetWeakOkRequest, BufferCollectionControlHandle)> {
5085 if let BufferCollectionRequest::SetWeakOk { payload, control_handle } = self {
5086 Some((payload, control_handle))
5087 } else {
5088 None
5089 }
5090 }
5091
5092 #[allow(irrefutable_let_patterns)]
5093 pub fn into_attach_node_tracking(
5094 self,
5095 ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionControlHandle)> {
5096 if let BufferCollectionRequest::AttachNodeTracking { payload, control_handle } = self {
5097 Some((payload, control_handle))
5098 } else {
5099 None
5100 }
5101 }
5102
5103 #[allow(irrefutable_let_patterns)]
5104 pub fn into_set_constraints(
5105 self,
5106 ) -> Option<(BufferCollectionSetConstraintsRequest, BufferCollectionControlHandle)> {
5107 if let BufferCollectionRequest::SetConstraints { payload, control_handle } = self {
5108 Some((payload, control_handle))
5109 } else {
5110 None
5111 }
5112 }
5113
5114 #[allow(irrefutable_let_patterns)]
5115 pub fn into_wait_for_all_buffers_allocated(
5116 self,
5117 ) -> Option<(BufferCollectionWaitForAllBuffersAllocatedResponder)> {
5118 if let BufferCollectionRequest::WaitForAllBuffersAllocated { responder } = self {
5119 Some((responder))
5120 } else {
5121 None
5122 }
5123 }
5124
5125 #[allow(irrefutable_let_patterns)]
5126 pub fn into_check_all_buffers_allocated(
5127 self,
5128 ) -> Option<(BufferCollectionCheckAllBuffersAllocatedResponder)> {
5129 if let BufferCollectionRequest::CheckAllBuffersAllocated { responder } = self {
5130 Some((responder))
5131 } else {
5132 None
5133 }
5134 }
5135
5136 #[allow(irrefutable_let_patterns)]
5137 pub fn into_attach_token(
5138 self,
5139 ) -> Option<(BufferCollectionAttachTokenRequest, BufferCollectionControlHandle)> {
5140 if let BufferCollectionRequest::AttachToken { payload, control_handle } = self {
5141 Some((payload, control_handle))
5142 } else {
5143 None
5144 }
5145 }
5146
5147 #[allow(irrefutable_let_patterns)]
5148 pub fn into_attach_lifetime_tracking(
5149 self,
5150 ) -> Option<(BufferCollectionAttachLifetimeTrackingRequest, BufferCollectionControlHandle)>
5151 {
5152 if let BufferCollectionRequest::AttachLifetimeTracking { payload, control_handle } = self {
5153 Some((payload, control_handle))
5154 } else {
5155 None
5156 }
5157 }
5158
5159 /// Name of the method defined in FIDL
5160 pub fn method_name(&self) -> &'static str {
5161 match *self {
5162 BufferCollectionRequest::Sync { .. } => "sync",
5163 BufferCollectionRequest::Release { .. } => "release",
5164 BufferCollectionRequest::SetName { .. } => "set_name",
5165 BufferCollectionRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
5166 BufferCollectionRequest::SetDebugTimeoutLogDeadline { .. } => {
5167 "set_debug_timeout_log_deadline"
5168 }
5169 BufferCollectionRequest::SetVerboseLogging { .. } => "set_verbose_logging",
5170 BufferCollectionRequest::GetNodeRef { .. } => "get_node_ref",
5171 BufferCollectionRequest::IsAlternateFor { .. } => "is_alternate_for",
5172 BufferCollectionRequest::GetBufferCollectionId { .. } => "get_buffer_collection_id",
5173 BufferCollectionRequest::SetWeak { .. } => "set_weak",
5174 BufferCollectionRequest::SetWeakOk { .. } => "set_weak_ok",
5175 BufferCollectionRequest::AttachNodeTracking { .. } => "attach_node_tracking",
5176 BufferCollectionRequest::SetConstraints { .. } => "set_constraints",
5177 BufferCollectionRequest::WaitForAllBuffersAllocated { .. } => {
5178 "wait_for_all_buffers_allocated"
5179 }
5180 BufferCollectionRequest::CheckAllBuffersAllocated { .. } => {
5181 "check_all_buffers_allocated"
5182 }
5183 BufferCollectionRequest::AttachToken { .. } => "attach_token",
5184 BufferCollectionRequest::AttachLifetimeTracking { .. } => "attach_lifetime_tracking",
5185 BufferCollectionRequest::_UnknownMethod {
5186 method_type: fidl::MethodType::OneWay,
5187 ..
5188 } => "unknown one-way method",
5189 BufferCollectionRequest::_UnknownMethod {
5190 method_type: fidl::MethodType::TwoWay,
5191 ..
5192 } => "unknown two-way method",
5193 }
5194 }
5195}
5196
5197#[derive(Debug, Clone)]
5198pub struct BufferCollectionControlHandle {
5199 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
5200}
5201
5202impl fidl::endpoints::ControlHandle for BufferCollectionControlHandle {
5203 fn shutdown(&self) {
5204 self.inner.shutdown()
5205 }
5206 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
5207 self.inner.shutdown_with_epitaph(status)
5208 }
5209
5210 fn is_closed(&self) -> bool {
5211 self.inner.channel().is_closed()
5212 }
5213 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
5214 self.inner.channel().on_closed()
5215 }
5216
5217 #[cfg(target_os = "fuchsia")]
5218 fn signal_peer(
5219 &self,
5220 clear_mask: zx::Signals,
5221 set_mask: zx::Signals,
5222 ) -> Result<(), zx_status::Status> {
5223 use fidl::Peered;
5224 self.inner.channel().signal_peer(clear_mask, set_mask)
5225 }
5226}
5227
5228impl BufferCollectionControlHandle {}
5229
5230#[must_use = "FIDL methods require a response to be sent"]
5231#[derive(Debug)]
5232pub struct BufferCollectionSyncResponder {
5233 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5234 tx_id: u32,
5235}
5236
5237/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5238/// if the responder is dropped without sending a response, so that the client
5239/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5240impl std::ops::Drop for BufferCollectionSyncResponder {
5241 fn drop(&mut self) {
5242 self.control_handle.shutdown();
5243 // Safety: drops once, never accessed again
5244 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5245 }
5246}
5247
5248impl fidl::endpoints::Responder for BufferCollectionSyncResponder {
5249 type ControlHandle = BufferCollectionControlHandle;
5250
5251 fn control_handle(&self) -> &BufferCollectionControlHandle {
5252 &self.control_handle
5253 }
5254
5255 fn drop_without_shutdown(mut self) {
5256 // Safety: drops once, never accessed again due to mem::forget
5257 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5258 // Prevent Drop from running (which would shut down the channel)
5259 std::mem::forget(self);
5260 }
5261}
5262
5263impl BufferCollectionSyncResponder {
5264 /// Sends a response to the FIDL transaction.
5265 ///
5266 /// Sets the channel to shutdown if an error occurs.
5267 pub fn send(self) -> Result<(), fidl::Error> {
5268 let _result = self.send_raw();
5269 if _result.is_err() {
5270 self.control_handle.shutdown();
5271 }
5272 self.drop_without_shutdown();
5273 _result
5274 }
5275
5276 /// Similar to "send" but does not shutdown the channel if an error occurs.
5277 pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
5278 let _result = self.send_raw();
5279 self.drop_without_shutdown();
5280 _result
5281 }
5282
5283 fn send_raw(&self) -> Result<(), fidl::Error> {
5284 self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
5285 fidl::encoding::Flexible::new(()),
5286 self.tx_id,
5287 0x11ac2555cf575b54,
5288 fidl::encoding::DynamicFlags::FLEXIBLE,
5289 )
5290 }
5291}
5292
5293#[must_use = "FIDL methods require a response to be sent"]
5294#[derive(Debug)]
5295pub struct BufferCollectionGetNodeRefResponder {
5296 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5297 tx_id: u32,
5298}
5299
5300/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5301/// if the responder is dropped without sending a response, so that the client
5302/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5303impl std::ops::Drop for BufferCollectionGetNodeRefResponder {
5304 fn drop(&mut self) {
5305 self.control_handle.shutdown();
5306 // Safety: drops once, never accessed again
5307 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5308 }
5309}
5310
5311impl fidl::endpoints::Responder for BufferCollectionGetNodeRefResponder {
5312 type ControlHandle = BufferCollectionControlHandle;
5313
5314 fn control_handle(&self) -> &BufferCollectionControlHandle {
5315 &self.control_handle
5316 }
5317
5318 fn drop_without_shutdown(mut self) {
5319 // Safety: drops once, never accessed again due to mem::forget
5320 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5321 // Prevent Drop from running (which would shut down the channel)
5322 std::mem::forget(self);
5323 }
5324}
5325
5326impl BufferCollectionGetNodeRefResponder {
5327 /// Sends a response to the FIDL transaction.
5328 ///
5329 /// Sets the channel to shutdown if an error occurs.
5330 pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
5331 let _result = self.send_raw(payload);
5332 if _result.is_err() {
5333 self.control_handle.shutdown();
5334 }
5335 self.drop_without_shutdown();
5336 _result
5337 }
5338
5339 /// Similar to "send" but does not shutdown the channel if an error occurs.
5340 pub fn send_no_shutdown_on_err(
5341 self,
5342 mut payload: NodeGetNodeRefResponse,
5343 ) -> Result<(), fidl::Error> {
5344 let _result = self.send_raw(payload);
5345 self.drop_without_shutdown();
5346 _result
5347 }
5348
5349 fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
5350 self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
5351 fidl::encoding::Flexible::new(&mut payload),
5352 self.tx_id,
5353 0x5b3d0e51614df053,
5354 fidl::encoding::DynamicFlags::FLEXIBLE,
5355 )
5356 }
5357}
5358
5359#[must_use = "FIDL methods require a response to be sent"]
5360#[derive(Debug)]
5361pub struct BufferCollectionIsAlternateForResponder {
5362 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5363 tx_id: u32,
5364}
5365
5366/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5367/// if the responder is dropped without sending a response, so that the client
5368/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5369impl std::ops::Drop for BufferCollectionIsAlternateForResponder {
5370 fn drop(&mut self) {
5371 self.control_handle.shutdown();
5372 // Safety: drops once, never accessed again
5373 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5374 }
5375}
5376
5377impl fidl::endpoints::Responder for BufferCollectionIsAlternateForResponder {
5378 type ControlHandle = BufferCollectionControlHandle;
5379
5380 fn control_handle(&self) -> &BufferCollectionControlHandle {
5381 &self.control_handle
5382 }
5383
5384 fn drop_without_shutdown(mut self) {
5385 // Safety: drops once, never accessed again due to mem::forget
5386 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5387 // Prevent Drop from running (which would shut down the channel)
5388 std::mem::forget(self);
5389 }
5390}
5391
5392impl BufferCollectionIsAlternateForResponder {
5393 /// Sends a response to the FIDL transaction.
5394 ///
5395 /// Sets the channel to shutdown if an error occurs.
5396 pub fn send(
5397 self,
5398 mut result: Result<&NodeIsAlternateForResponse, Error>,
5399 ) -> Result<(), fidl::Error> {
5400 let _result = self.send_raw(result);
5401 if _result.is_err() {
5402 self.control_handle.shutdown();
5403 }
5404 self.drop_without_shutdown();
5405 _result
5406 }
5407
5408 /// Similar to "send" but does not shutdown the channel if an error occurs.
5409 pub fn send_no_shutdown_on_err(
5410 self,
5411 mut result: Result<&NodeIsAlternateForResponse, Error>,
5412 ) -> Result<(), fidl::Error> {
5413 let _result = self.send_raw(result);
5414 self.drop_without_shutdown();
5415 _result
5416 }
5417
5418 fn send_raw(
5419 &self,
5420 mut result: Result<&NodeIsAlternateForResponse, Error>,
5421 ) -> Result<(), fidl::Error> {
5422 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5423 NodeIsAlternateForResponse,
5424 Error,
5425 >>(
5426 fidl::encoding::FlexibleResult::new(result),
5427 self.tx_id,
5428 0x3a58e00157e0825,
5429 fidl::encoding::DynamicFlags::FLEXIBLE,
5430 )
5431 }
5432}
5433
5434#[must_use = "FIDL methods require a response to be sent"]
5435#[derive(Debug)]
5436pub struct BufferCollectionGetBufferCollectionIdResponder {
5437 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5438 tx_id: u32,
5439}
5440
5441/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5442/// if the responder is dropped without sending a response, so that the client
5443/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5444impl std::ops::Drop for BufferCollectionGetBufferCollectionIdResponder {
5445 fn drop(&mut self) {
5446 self.control_handle.shutdown();
5447 // Safety: drops once, never accessed again
5448 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5449 }
5450}
5451
5452impl fidl::endpoints::Responder for BufferCollectionGetBufferCollectionIdResponder {
5453 type ControlHandle = BufferCollectionControlHandle;
5454
5455 fn control_handle(&self) -> &BufferCollectionControlHandle {
5456 &self.control_handle
5457 }
5458
5459 fn drop_without_shutdown(mut self) {
5460 // Safety: drops once, never accessed again due to mem::forget
5461 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5462 // Prevent Drop from running (which would shut down the channel)
5463 std::mem::forget(self);
5464 }
5465}
5466
5467impl BufferCollectionGetBufferCollectionIdResponder {
5468 /// Sends a response to the FIDL transaction.
5469 ///
5470 /// Sets the channel to shutdown if an error occurs.
5471 pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
5472 let _result = self.send_raw(payload);
5473 if _result.is_err() {
5474 self.control_handle.shutdown();
5475 }
5476 self.drop_without_shutdown();
5477 _result
5478 }
5479
5480 /// Similar to "send" but does not shutdown the channel if an error occurs.
5481 pub fn send_no_shutdown_on_err(
5482 self,
5483 mut payload: &NodeGetBufferCollectionIdResponse,
5484 ) -> Result<(), fidl::Error> {
5485 let _result = self.send_raw(payload);
5486 self.drop_without_shutdown();
5487 _result
5488 }
5489
5490 fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
5491 self.control_handle
5492 .inner
5493 .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
5494 fidl::encoding::Flexible::new(payload),
5495 self.tx_id,
5496 0x77d19a494b78ba8c,
5497 fidl::encoding::DynamicFlags::FLEXIBLE,
5498 )
5499 }
5500}
5501
5502#[must_use = "FIDL methods require a response to be sent"]
5503#[derive(Debug)]
5504pub struct BufferCollectionWaitForAllBuffersAllocatedResponder {
5505 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5506 tx_id: u32,
5507}
5508
5509/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5510/// if the responder is dropped without sending a response, so that the client
5511/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5512impl std::ops::Drop for BufferCollectionWaitForAllBuffersAllocatedResponder {
5513 fn drop(&mut self) {
5514 self.control_handle.shutdown();
5515 // Safety: drops once, never accessed again
5516 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5517 }
5518}
5519
5520impl fidl::endpoints::Responder for BufferCollectionWaitForAllBuffersAllocatedResponder {
5521 type ControlHandle = BufferCollectionControlHandle;
5522
5523 fn control_handle(&self) -> &BufferCollectionControlHandle {
5524 &self.control_handle
5525 }
5526
5527 fn drop_without_shutdown(mut self) {
5528 // Safety: drops once, never accessed again due to mem::forget
5529 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5530 // Prevent Drop from running (which would shut down the channel)
5531 std::mem::forget(self);
5532 }
5533}
5534
5535impl BufferCollectionWaitForAllBuffersAllocatedResponder {
5536 /// Sends a response to the FIDL transaction.
5537 ///
5538 /// Sets the channel to shutdown if an error occurs.
5539 pub fn send(
5540 self,
5541 mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5542 ) -> Result<(), fidl::Error> {
5543 let _result = self.send_raw(result);
5544 if _result.is_err() {
5545 self.control_handle.shutdown();
5546 }
5547 self.drop_without_shutdown();
5548 _result
5549 }
5550
5551 /// Similar to "send" but does not shutdown the channel if an error occurs.
5552 pub fn send_no_shutdown_on_err(
5553 self,
5554 mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5555 ) -> Result<(), fidl::Error> {
5556 let _result = self.send_raw(result);
5557 self.drop_without_shutdown();
5558 _result
5559 }
5560
5561 fn send_raw(
5562 &self,
5563 mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5564 ) -> Result<(), fidl::Error> {
5565 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5566 BufferCollectionWaitForAllBuffersAllocatedResponse,
5567 Error,
5568 >>(
5569 fidl::encoding::FlexibleResult::new(result.as_mut().map_err(|e| *e)),
5570 self.tx_id,
5571 0x62300344b61404e,
5572 fidl::encoding::DynamicFlags::FLEXIBLE,
5573 )
5574 }
5575}
5576
5577#[must_use = "FIDL methods require a response to be sent"]
5578#[derive(Debug)]
5579pub struct BufferCollectionCheckAllBuffersAllocatedResponder {
5580 control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5581 tx_id: u32,
5582}
5583
5584/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5585/// if the responder is dropped without sending a response, so that the client
5586/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5587impl std::ops::Drop for BufferCollectionCheckAllBuffersAllocatedResponder {
5588 fn drop(&mut self) {
5589 self.control_handle.shutdown();
5590 // Safety: drops once, never accessed again
5591 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5592 }
5593}
5594
5595impl fidl::endpoints::Responder for BufferCollectionCheckAllBuffersAllocatedResponder {
5596 type ControlHandle = BufferCollectionControlHandle;
5597
5598 fn control_handle(&self) -> &BufferCollectionControlHandle {
5599 &self.control_handle
5600 }
5601
5602 fn drop_without_shutdown(mut self) {
5603 // Safety: drops once, never accessed again due to mem::forget
5604 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5605 // Prevent Drop from running (which would shut down the channel)
5606 std::mem::forget(self);
5607 }
5608}
5609
5610impl BufferCollectionCheckAllBuffersAllocatedResponder {
5611 /// Sends a response to the FIDL transaction.
5612 ///
5613 /// Sets the channel to shutdown if an error occurs.
5614 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5615 let _result = self.send_raw(result);
5616 if _result.is_err() {
5617 self.control_handle.shutdown();
5618 }
5619 self.drop_without_shutdown();
5620 _result
5621 }
5622
5623 /// Similar to "send" but does not shutdown the channel if an error occurs.
5624 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5625 let _result = self.send_raw(result);
5626 self.drop_without_shutdown();
5627 _result
5628 }
5629
5630 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5631 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5632 fidl::encoding::EmptyStruct,
5633 Error,
5634 >>(
5635 fidl::encoding::FlexibleResult::new(result),
5636 self.tx_id,
5637 0x35a5fe77ce939c10,
5638 fidl::encoding::DynamicFlags::FLEXIBLE,
5639 )
5640 }
5641}
5642
5643#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
5644pub struct BufferCollectionTokenMarker;
5645
5646impl fidl::endpoints::ProtocolMarker for BufferCollectionTokenMarker {
5647 type Proxy = BufferCollectionTokenProxy;
5648 type RequestStream = BufferCollectionTokenRequestStream;
5649 #[cfg(target_os = "fuchsia")]
5650 type SynchronousProxy = BufferCollectionTokenSynchronousProxy;
5651
5652 const DEBUG_NAME: &'static str = "(anonymous) BufferCollectionToken";
5653}
5654
5655pub trait BufferCollectionTokenProxyInterface: Send + Sync {
5656 type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
5657 fn r#sync(&self) -> Self::SyncResponseFut;
5658 fn r#release(&self) -> Result<(), fidl::Error>;
5659 fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
5660 fn r#set_debug_client_info(
5661 &self,
5662 payload: &NodeSetDebugClientInfoRequest,
5663 ) -> Result<(), fidl::Error>;
5664 fn r#set_debug_timeout_log_deadline(
5665 &self,
5666 payload: &NodeSetDebugTimeoutLogDeadlineRequest,
5667 ) -> Result<(), fidl::Error>;
5668 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
5669 type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
5670 + Send;
5671 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
5672 type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
5673 + Send;
5674 fn r#is_alternate_for(
5675 &self,
5676 payload: NodeIsAlternateForRequest,
5677 ) -> Self::IsAlternateForResponseFut;
5678 type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
5679 + Send;
5680 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
5681 fn r#set_weak(&self) -> Result<(), fidl::Error>;
5682 fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
5683 fn r#attach_node_tracking(
5684 &self,
5685 payload: NodeAttachNodeTrackingRequest,
5686 ) -> Result<(), fidl::Error>;
5687 type DuplicateSyncResponseFut: std::future::Future<
5688 Output = Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error>,
5689 > + Send;
5690 fn r#duplicate_sync(
5691 &self,
5692 payload: &BufferCollectionTokenDuplicateSyncRequest,
5693 ) -> Self::DuplicateSyncResponseFut;
5694 fn r#duplicate(
5695 &self,
5696 payload: BufferCollectionTokenDuplicateRequest,
5697 ) -> Result<(), fidl::Error>;
5698 fn r#set_dispensable(&self) -> Result<(), fidl::Error>;
5699 fn r#create_buffer_collection_token_group(
5700 &self,
5701 payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
5702 ) -> Result<(), fidl::Error>;
5703}
5704#[derive(Debug)]
5705#[cfg(target_os = "fuchsia")]
5706pub struct BufferCollectionTokenSynchronousProxy {
5707 client: fidl::client::sync::Client,
5708}
5709
5710#[cfg(target_os = "fuchsia")]
5711impl fidl::endpoints::SynchronousProxy for BufferCollectionTokenSynchronousProxy {
5712 type Proxy = BufferCollectionTokenProxy;
5713 type Protocol = BufferCollectionTokenMarker;
5714
5715 fn from_channel(inner: fidl::Channel) -> Self {
5716 Self::new(inner)
5717 }
5718
5719 fn into_channel(self) -> fidl::Channel {
5720 self.client.into_channel()
5721 }
5722
5723 fn as_channel(&self) -> &fidl::Channel {
5724 self.client.as_channel()
5725 }
5726}
5727
5728#[cfg(target_os = "fuchsia")]
5729impl BufferCollectionTokenSynchronousProxy {
5730 pub fn new(channel: fidl::Channel) -> Self {
5731 let protocol_name =
5732 <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
5733 Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
5734 }
5735
5736 pub fn into_channel(self) -> fidl::Channel {
5737 self.client.into_channel()
5738 }
5739
5740 /// Waits until an event arrives and returns it. It is safe for other
5741 /// threads to make concurrent requests while waiting for an event.
5742 pub fn wait_for_event(
5743 &self,
5744 deadline: zx::MonotonicInstant,
5745 ) -> Result<BufferCollectionTokenEvent, fidl::Error> {
5746 BufferCollectionTokenEvent::decode(self.client.wait_for_event(deadline)?)
5747 }
5748
5749 /// Ensure that previous messages have been received server side. This is
5750 /// particularly useful after previous messages that created new tokens,
5751 /// because a token must be known to the sysmem server before sending the
5752 /// token to another participant.
5753 ///
5754 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
5755 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
5756 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
5757 /// to mitigate the possibility of a hostile/fake
5758 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
5759 /// Another way is to pass the token to
5760 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
5761 /// the token as part of exchanging it for a
5762 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
5763 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
5764 /// of stalling.
5765 ///
5766 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
5767 /// and then starting and completing a `Sync`, it's then safe to send the
5768 /// `BufferCollectionToken` client ends to other participants knowing the
5769 /// server will recognize the tokens when they're sent by the other
5770 /// participants to sysmem in a
5771 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
5772 /// efficient way to create tokens while avoiding unnecessary round trips.
5773 ///
5774 /// Other options include waiting for each
5775 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
5776 /// individually (using separate call to `Sync` after each), or calling
5777 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
5778 /// converted to a `BufferCollection` via
5779 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
5780 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
5781 /// the sync step and can create multiple tokens at once.
5782 pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
5783 let _response = self.client.send_query::<
5784 fidl::encoding::EmptyPayload,
5785 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
5786 >(
5787 (),
5788 0x11ac2555cf575b54,
5789 fidl::encoding::DynamicFlags::FLEXIBLE,
5790 ___deadline,
5791 )?
5792 .into_result::<BufferCollectionTokenMarker>("sync")?;
5793 Ok(_response)
5794 }
5795
5796 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
5797 ///
5798 /// Normally a participant will convert a `BufferCollectionToken` into a
5799 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
5800 /// `Release` via the token (and then close the channel immediately or
5801 /// shortly later in response to server closing the server end), which
5802 /// avoids causing buffer collection failure. Without a prior `Release`,
5803 /// closing the `BufferCollectionToken` client end will cause buffer
5804 /// collection failure.
5805 ///
5806 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
5807 ///
5808 /// By default the server handles unexpected closure of a
5809 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
5810 /// first) by failing the buffer collection. Partly this is to expedite
5811 /// closing VMO handles to reclaim memory when any participant fails. If a
5812 /// participant would like to cleanly close a `BufferCollection` without
5813 /// causing buffer collection failure, the participant can send `Release`
5814 /// before closing the `BufferCollection` client end. The `Release` can
5815 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
5816 /// buffer collection won't require constraints from this node in order to
5817 /// allocate. If after `SetConstraints`, the constraints are retained and
5818 /// aggregated, despite the lack of `BufferCollection` connection at the
5819 /// time of constraints aggregation.
5820 ///
5821 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
5822 ///
5823 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
5824 /// end (without `Release` first) will trigger failure of the buffer
5825 /// collection. To close a `BufferCollectionTokenGroup` channel without
5826 /// failing the buffer collection, ensure that AllChildrenPresent() has been
5827 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
5828 /// client end.
5829 ///
5830 /// If `Release` occurs before
5831 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
5832 /// buffer collection will fail (triggered by reception of `Release` without
5833 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
5834 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
5835 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
5836 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
5837 /// close requires `AllChildrenPresent` (if not already sent), then
5838 /// `Release`, then close client end.
5839 ///
5840 /// If `Release` occurs after `AllChildrenPresent`, the children and all
5841 /// their constraints remain intact (just as they would if the
5842 /// `BufferCollectionTokenGroup` channel had remained open), and the client
5843 /// end close doesn't trigger buffer collection failure.
5844 ///
5845 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
5846 ///
5847 /// For brevity, the per-channel-protocol paragraphs above ignore the
5848 /// separate failure domain created by
5849 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
5850 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
5851 /// unexpectedly closes (without `Release` first) and that client end is
5852 /// under a failure domain, instead of failing the whole buffer collection,
5853 /// the failure domain is failed, but the buffer collection itself is
5854 /// isolated from failure of the failure domain. Such failure domains can be
5855 /// nested, in which case only the inner-most failure domain in which the
5856 /// `Node` resides fails.
5857 pub fn r#release(&self) -> Result<(), fidl::Error> {
5858 self.client.send::<fidl::encoding::EmptyPayload>(
5859 (),
5860 0x6a5cae7d6d6e04c6,
5861 fidl::encoding::DynamicFlags::FLEXIBLE,
5862 )
5863 }
5864
5865 /// Set a name for VMOs in this buffer collection.
5866 ///
5867 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
5868 /// will be truncated to fit. The name of the vmo will be suffixed with the
5869 /// buffer index within the collection (if the suffix fits within
5870 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
5871 /// listed in the inspect data.
5872 ///
5873 /// The name only affects VMOs allocated after the name is set; this call
5874 /// does not rename existing VMOs. If multiple clients set different names
5875 /// then the larger priority value will win. Setting a new name with the
5876 /// same priority as a prior name doesn't change the name.
5877 ///
5878 /// All table fields are currently required.
5879 ///
5880 /// + request `priority` The name is only set if this is the first `SetName`
5881 /// or if `priority` is greater than any previous `priority` value in
5882 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
5883 /// + request `name` The name for VMOs created under this buffer collection.
5884 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
5885 self.client.send::<NodeSetNameRequest>(
5886 payload,
5887 0xb41f1624f48c1e9,
5888 fidl::encoding::DynamicFlags::FLEXIBLE,
5889 )
5890 }
5891
5892 /// Set information about the current client that can be used by sysmem to
5893 /// help diagnose leaking memory and allocation stalls waiting for a
5894 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
5895 ///
5896 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
5897 /// `Node`(s) derived from this `Node`, unless overriden by
5898 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
5899 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
5900 ///
5901 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
5902 /// `Allocator` is the most efficient way to ensure that all
5903 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
5904 /// set, and is also more efficient than separately sending the same debug
5905 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
5906 /// created [`fuchsia.sysmem2/Node`].
5907 ///
5908 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
5909 /// indicate which client is closing their channel first, leading to subtree
5910 /// failure (which can be normal if the purpose of the subtree is over, but
5911 /// if happening earlier than expected, the client-channel-specific name can
5912 /// help diagnose where the failure is first coming from, from sysmem's
5913 /// point of view).
5914 ///
5915 /// All table fields are currently required.
5916 ///
5917 /// + request `name` This can be an arbitrary string, but the current
5918 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
5919 /// + request `id` This can be an arbitrary id, but the current process ID
5920 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
5921 pub fn r#set_debug_client_info(
5922 &self,
5923 mut payload: &NodeSetDebugClientInfoRequest,
5924 ) -> Result<(), fidl::Error> {
5925 self.client.send::<NodeSetDebugClientInfoRequest>(
5926 payload,
5927 0x5cde8914608d99b1,
5928 fidl::encoding::DynamicFlags::FLEXIBLE,
5929 )
5930 }
5931
5932 /// Sysmem logs a warning if sysmem hasn't seen
5933 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
5934 /// within 5 seconds after creation of a new collection.
5935 ///
5936 /// Clients can call this method to change when the log is printed. If
5937 /// multiple client set the deadline, it's unspecified which deadline will
5938 /// take effect.
5939 ///
5940 /// In most cases the default works well.
5941 ///
5942 /// All table fields are currently required.
5943 ///
5944 /// + request `deadline` The time at which sysmem will start trying to log
5945 /// the warning, unless all constraints are with sysmem by then.
5946 pub fn r#set_debug_timeout_log_deadline(
5947 &self,
5948 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
5949 ) -> Result<(), fidl::Error> {
5950 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
5951 payload,
5952 0x716b0af13d5c0806,
5953 fidl::encoding::DynamicFlags::FLEXIBLE,
5954 )
5955 }
5956
5957 /// This enables verbose logging for the buffer collection.
5958 ///
5959 /// Verbose logging includes constraints set via
5960 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
5961 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
5962 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
5963 /// the tree of `Node`(s).
5964 ///
5965 /// Normally sysmem prints only a single line complaint when aggregation
5966 /// fails, with just the specific detailed reason that aggregation failed,
5967 /// with little surrounding context. While this is often enough to diagnose
5968 /// a problem if only a small change was made and everything was working
5969 /// before the small change, it's often not particularly helpful for getting
5970 /// a new buffer collection to work for the first time. Especially with
5971 /// more complex trees of nodes, involving things like
5972 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
5973 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
5974 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
5975 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
5976 /// looks like and why it's failing a logical allocation, or why a tree or
5977 /// subtree is failing sooner than expected.
5978 ///
5979 /// The intent of the extra logging is to be acceptable from a performance
5980 /// point of view, under the assumption that verbose logging is only enabled
5981 /// on a low number of buffer collections. If we're not tracking down a bug,
5982 /// we shouldn't send this message.
5983 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
5984 self.client.send::<fidl::encoding::EmptyPayload>(
5985 (),
5986 0x5209c77415b4dfad,
5987 fidl::encoding::DynamicFlags::FLEXIBLE,
5988 )
5989 }
5990
5991 /// This gets a handle that can be used as a parameter to
5992 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
5993 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
5994 /// client obtained this handle from this `Node`.
5995 ///
5996 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
5997 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
5998 /// despite the two calls typically being on different channels.
5999 ///
6000 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
6001 ///
6002 /// All table fields are currently required.
6003 ///
6004 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
6005 /// different `Node` channel, to prove that the client obtained the handle
6006 /// from this `Node`.
6007 pub fn r#get_node_ref(
6008 &self,
6009 ___deadline: zx::MonotonicInstant,
6010 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
6011 let _response = self.client.send_query::<
6012 fidl::encoding::EmptyPayload,
6013 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
6014 >(
6015 (),
6016 0x5b3d0e51614df053,
6017 fidl::encoding::DynamicFlags::FLEXIBLE,
6018 ___deadline,
6019 )?
6020 .into_result::<BufferCollectionTokenMarker>("get_node_ref")?;
6021 Ok(_response)
6022 }
6023
6024 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
6025 /// rooted at a different child token of a common parent
6026 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
6027 /// passed-in `node_ref`.
6028 ///
6029 /// This call is for assisting with admission control de-duplication, and
6030 /// with debugging.
6031 ///
6032 /// The `node_ref` must be obtained using
6033 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
6034 ///
6035 /// The `node_ref` can be a duplicated handle; it's not necessary to call
6036 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
6037 ///
6038 /// If a calling token may not actually be a valid token at all due to a
6039 /// potentially hostile/untrusted provider of the token, call
6040 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6041 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
6042 /// never responds due to a calling token not being a real token (not really
6043 /// talking to sysmem). Another option is to call
6044 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
6045 /// which also validates the token along with converting it to a
6046 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
6047 ///
6048 /// All table fields are currently required.
6049 ///
6050 /// - response `is_alternate`
6051 /// - true: The first parent node in common between the calling node and
6052 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
6053 /// that the calling `Node` and the `node_ref` `Node` will not have both
6054 /// their constraints apply - rather sysmem will choose one or the other
6055 /// of the constraints - never both. This is because only one child of
6056 /// a `BufferCollectionTokenGroup` is selected during logical
6057 /// allocation, with only that one child's subtree contributing to
6058 /// constraints aggregation.
6059 /// - false: The first parent node in common between the calling `Node`
6060 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
6061 /// Currently, this means the first parent node in common is a
6062 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
6063 /// `Release`ed). This means that the calling `Node` and the `node_ref`
6064 /// `Node` may have both their constraints apply during constraints
6065 /// aggregation of the logical allocation, if both `Node`(s) are
6066 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
6067 /// this case, there is no `BufferCollectionTokenGroup` that will
6068 /// directly prevent the two `Node`(s) from both being selected and
6069 /// their constraints both aggregated, but even when false, one or both
6070 /// `Node`(s) may still be eliminated from consideration if one or both
6071 /// `Node`(s) has a direct or indirect parent
6072 /// `BufferCollectionTokenGroup` which selects a child subtree other
6073 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
6074 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
6075 /// associated with the same buffer collection as the calling `Node`.
6076 /// Another reason for this error is if the `node_ref` is an
6077 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
6078 /// a real `node_ref` obtained from `GetNodeRef`.
6079 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
6080 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
6081 /// the needed rights expected on a real `node_ref`.
6082 /// * No other failing status codes are returned by this call. However,
6083 /// sysmem may add additional codes in future, so the client should have
6084 /// sensible default handling for any failing status code.
6085 pub fn r#is_alternate_for(
6086 &self,
6087 mut payload: NodeIsAlternateForRequest,
6088 ___deadline: zx::MonotonicInstant,
6089 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
6090 let _response = self.client.send_query::<
6091 NodeIsAlternateForRequest,
6092 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
6093 >(
6094 &mut payload,
6095 0x3a58e00157e0825,
6096 fidl::encoding::DynamicFlags::FLEXIBLE,
6097 ___deadline,
6098 )?
6099 .into_result::<BufferCollectionTokenMarker>("is_alternate_for")?;
6100 Ok(_response.map(|x| x))
6101 }
6102
6103 /// Get the buffer collection ID. This ID is also available from
6104 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
6105 /// within the collection).
6106 ///
6107 /// This call is mainly useful in situations where we can't convey a
6108 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
6109 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
6110 /// handle, which can be joined back up with a `BufferCollection` client end
6111 /// that was created via a different path. Prefer to convey a
6112 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
6113 ///
6114 /// Trusting a `buffer_collection_id` value from a source other than sysmem
6115 /// is analogous to trusting a koid value from a source other than zircon.
6116 /// Both should be avoided unless really necessary, and both require
6117 /// caution. In some situations it may be reasonable to refer to a
6118 /// pre-established `BufferCollection` by `buffer_collection_id` via a
6119 /// protocol for efficiency reasons, but an incoming value purporting to be
6120 /// a `buffer_collection_id` is not sufficient alone to justify granting the
6121 /// sender of the `buffer_collection_id` any capability. The sender must
6122 /// first prove to a receiver that the sender has/had a VMO or has/had a
6123 /// `BufferCollectionToken` to the same collection by sending a handle that
6124 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
6125 /// `buffer_collection_id` value. The receiver should take care to avoid
6126 /// assuming that a sender had a `BufferCollectionToken` in cases where the
6127 /// sender has only proven that the sender had a VMO.
6128 ///
6129 /// - response `buffer_collection_id` This ID is unique per buffer
6130 /// collection per boot. Each buffer is uniquely identified by the
6131 /// `buffer_collection_id` and `buffer_index` together.
6132 pub fn r#get_buffer_collection_id(
6133 &self,
6134 ___deadline: zx::MonotonicInstant,
6135 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
6136 let _response = self.client.send_query::<
6137 fidl::encoding::EmptyPayload,
6138 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
6139 >(
6140 (),
6141 0x77d19a494b78ba8c,
6142 fidl::encoding::DynamicFlags::FLEXIBLE,
6143 ___deadline,
6144 )?
6145 .into_result::<BufferCollectionTokenMarker>("get_buffer_collection_id")?;
6146 Ok(_response)
6147 }
6148
6149 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
6150 /// created after this message to weak, which means that a client's `Node`
6151 /// client end (or a child created after this message) is not alone
6152 /// sufficient to keep allocated VMOs alive.
6153 ///
6154 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
6155 /// `close_weak_asap`.
6156 ///
6157 /// This message is only permitted before the `Node` becomes ready for
6158 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
6159 /// * `BufferCollectionToken`: any time
6160 /// * `BufferCollection`: before `SetConstraints`
6161 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
6162 ///
6163 /// Currently, no conversion from strong `Node` to weak `Node` after ready
6164 /// for allocation is provided, but a client can simulate that by creating
6165 /// an additional `Node` before allocation and setting that additional
6166 /// `Node` to weak, and then potentially at some point later sending
6167 /// `Release` and closing the client end of the client's strong `Node`, but
6168 /// keeping the client's weak `Node`.
6169 ///
6170 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
6171 /// collection failure (all `Node` client end(s) will see
6172 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
6173 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
6174 /// this situation until all `Node`(s) are ready for allocation. For initial
6175 /// allocation to succeed, at least one strong `Node` is required to exist
6176 /// at allocation time, but after that client receives VMO handles, that
6177 /// client can `BufferCollection.Release` and close the client end without
6178 /// causing this type of failure.
6179 ///
6180 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
6181 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
6182 /// separately as appropriate.
6183 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
6184 self.client.send::<fidl::encoding::EmptyPayload>(
6185 (),
6186 0x22dd3ea514eeffe1,
6187 fidl::encoding::DynamicFlags::FLEXIBLE,
6188 )
6189 }
6190
6191 /// This indicates to sysmem that the client is prepared to pay attention to
6192 /// `close_weak_asap`.
6193 ///
6194 /// If sent, this message must be before
6195 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
6196 ///
6197 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
6198 /// send this message before `WaitForAllBuffersAllocated`, or a parent
6199 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
6200 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
6201 /// trigger buffer collection failure.
6202 ///
6203 /// This message is necessary because weak sysmem VMOs have not always been
6204 /// a thing, so older clients are not aware of the need to pay attention to
6205 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
6206 /// sysmem weak VMO handles asap. By having this message and requiring
6207 /// participants to indicate their acceptance of this aspect of the overall
6208 /// protocol, we avoid situations where an older client is delivered a weak
6209 /// VMO without any way for sysmem to get that VMO to close quickly later
6210 /// (and on a per-buffer basis).
6211 ///
6212 /// A participant that doesn't handle `close_weak_asap` and also doesn't
6213 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
6214 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
6215 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
6216 /// same participant has a child/delegate which does retrieve VMOs, that
6217 /// child/delegate will need to send `SetWeakOk` before
6218 /// `WaitForAllBuffersAllocated`.
6219 ///
6220 /// + request `for_child_nodes_also` If present and true, this means direct
6221 /// child nodes of this node created after this message plus all
6222 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
6223 /// those nodes. Any child node of this node that was created before this
6224 /// message is not included. This setting is "sticky" in the sense that a
6225 /// subsequent `SetWeakOk` without this bool set to true does not reset
6226 /// the server-side bool. If this creates a problem for a participant, a
6227 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
6228 /// tokens instead, as appropriate. A participant should only set
6229 /// `for_child_nodes_also` true if the participant can really promise to
6230 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
6231 /// weak VMO handles held by participants holding the corresponding child
6232 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
6233 /// which are using sysmem(1) can be weak, despite the clients of those
6234 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
6235 /// direct way to find out about `close_weak_asap`. This only applies to
6236 /// descendents of this `Node` which are using sysmem(1), not to this
6237 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
6238 /// token, which will fail allocation unless an ancestor of this `Node`
6239 /// specified `for_child_nodes_also` true.
6240 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
6241 self.client.send::<NodeSetWeakOkRequest>(
6242 &mut payload,
6243 0x38a44fc4d7724be9,
6244 fidl::encoding::DynamicFlags::FLEXIBLE,
6245 )
6246 }
6247
6248 /// The server_end will be closed after this `Node` and any child nodes have
6249 /// have released their buffer counts, making those counts available for
6250 /// reservation by a different `Node` via
6251 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
6252 ///
6253 /// The `Node` buffer counts may not be released until the entire tree of
6254 /// `Node`(s) is closed or failed, because
6255 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
6256 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
6257 /// `Node` buffer counts remain reserved until the orphaned node is later
6258 /// cleaned up.
6259 ///
6260 /// If the `Node` exceeds a fairly large number of attached eventpair server
6261 /// ends, a log message will indicate this and the `Node` (and the
6262 /// appropriate) sub-tree will fail.
6263 ///
6264 /// The `server_end` will remain open when
6265 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
6266 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
6267 /// [`fuchsia.sysmem2/BufferCollection`].
6268 ///
6269 /// This message can also be used with a
6270 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6271 pub fn r#attach_node_tracking(
6272 &self,
6273 mut payload: NodeAttachNodeTrackingRequest,
6274 ) -> Result<(), fidl::Error> {
6275 self.client.send::<NodeAttachNodeTrackingRequest>(
6276 &mut payload,
6277 0x3f22f2a293d3cdac,
6278 fidl::encoding::DynamicFlags::FLEXIBLE,
6279 )
6280 }
6281
6282 /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
6283 /// one, referring to the same buffer collection.
6284 ///
6285 /// The created tokens are children of this token in the
6286 /// [`fuchsia.sysmem2/Node`] heirarchy.
6287 ///
6288 /// This method can be used to add more participants, by transferring the
6289 /// newly created tokens to additional participants.
6290 ///
6291 /// A new token will be returned for each entry in the
6292 /// `rights_attenuation_masks` array.
6293 ///
6294 /// If the called token may not actually be a valid token due to a
6295 /// potentially hostile/untrusted provider of the token, consider using
6296 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6297 /// instead of potentially getting stuck indefinitely if
6298 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
6299 /// due to the calling token not being a real token.
6300 ///
6301 /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
6302 /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
6303 /// method, because the sync step is included in this call, at the cost of a
6304 /// round trip during this call.
6305 ///
6306 /// All tokens must be turned in to sysmem via
6307 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
6308 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
6309 /// successfully allocate buffers (or to logically allocate buffers in the
6310 /// case of subtrees involving
6311 /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
6312 ///
6313 /// All table fields are currently required.
6314 ///
6315 /// + request `rights_attenuation_mask` In each entry of
6316 /// `rights_attenuation_masks`, rights bits that are zero will be absent
6317 /// in the buffer VMO rights obtainable via the corresponding returned
6318 /// token. This allows an initiator or intermediary participant to
6319 /// attenuate the rights available to a participant. This does not allow a
6320 /// participant to gain rights that the participant doesn't already have.
6321 /// The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
6322 /// attenuation should be applied.
6323 /// - response `tokens` The client ends of each newly created token.
6324 pub fn r#duplicate_sync(
6325 &self,
6326 mut payload: &BufferCollectionTokenDuplicateSyncRequest,
6327 ___deadline: zx::MonotonicInstant,
6328 ) -> Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error> {
6329 let _response = self.client.send_query::<
6330 BufferCollectionTokenDuplicateSyncRequest,
6331 fidl::encoding::FlexibleType<BufferCollectionTokenDuplicateSyncResponse>,
6332 >(
6333 payload,
6334 0x1c1af9919d1ca45c,
6335 fidl::encoding::DynamicFlags::FLEXIBLE,
6336 ___deadline,
6337 )?
6338 .into_result::<BufferCollectionTokenMarker>("duplicate_sync")?;
6339 Ok(_response)
6340 }
6341
6342 /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
6343 /// one, referring to the same buffer collection.
6344 ///
6345 /// The created token is a child of this token in the
6346 /// [`fuchsia.sysmem2/Node`] heirarchy.
6347 ///
6348 /// This method can be used to add a participant, by transferring the newly
6349 /// created token to another participant.
6350 ///
6351 /// This one-way message can be used instead of the two-way
6352 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
6353 /// performance sensitive cases where it would be undesireable to wait for
6354 /// sysmem to respond to
6355 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
6356 /// client code isn't structured to make it easy to duplicate all the needed
6357 /// tokens at once.
6358 ///
6359 /// After sending one or more `Duplicate` messages, and before sending the
6360 /// newly created child tokens to other participants (or to other
6361 /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
6362 /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
6363 /// `Sync` call can be made on the token, or on the `BufferCollection`
6364 /// obtained by passing this token to `BindSharedCollection`. Either will
6365 /// ensure that the server knows about the tokens created via `Duplicate`
6366 /// before the other participant sends the token to the server via separate
6367 /// `Allocator` channel.
6368 ///
6369 /// All tokens must be turned in via
6370 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
6371 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
6372 /// successfully allocate buffers.
6373 ///
6374 /// All table fields are currently required.
6375 ///
6376 /// + request `rights_attenuation_mask` The rights bits that are zero in
6377 /// this mask will be absent in the buffer VMO rights obtainable via the
6378 /// client end of `token_request`. This allows an initiator or
6379 /// intermediary participant to attenuate the rights available to a
6380 /// delegate participant. This does not allow a participant to gain rights
6381 /// that the participant doesn't already have. The value
6382 /// `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
6383 /// should be applied.
6384 /// + These values for rights_attenuation_mask result in no attenuation:
6385 /// + `ZX_RIGHT_SAME_RIGHTS` (preferred)
6386 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is
6387 /// computed)
6388 /// + 0 (deprecated - do not use 0 - an ERROR will go to the log)
6389 /// + request `token_request` is the server end of a `BufferCollectionToken`
6390 /// channel. The client end of this channel acts as another participant in
6391 /// the shared buffer collection.
6392 pub fn r#duplicate(
6393 &self,
6394 mut payload: BufferCollectionTokenDuplicateRequest,
6395 ) -> Result<(), fidl::Error> {
6396 self.client.send::<BufferCollectionTokenDuplicateRequest>(
6397 &mut payload,
6398 0x73e78f92ee7fb887,
6399 fidl::encoding::DynamicFlags::FLEXIBLE,
6400 )
6401 }
6402
6403 /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
6404 ///
6405 /// When the `BufferCollectionToken` is converted to a
6406 /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
6407 /// the `BufferCollection` also.
6408 ///
6409 /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
6410 /// client end without having sent
6411 /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
6412 /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
6413 /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
6414 /// to the root `Node`, which fails the whole buffer collection. In
6415 /// contrast, a dispensable `Node` can fail after buffers are allocated
6416 /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
6417 /// heirarchy.
6418 ///
6419 /// The dispensable `Node` participates in constraints aggregation along
6420 /// with its parent before buffer allocation. If the dispensable `Node`
6421 /// fails before buffers are allocated, the failure propagates to the
6422 /// dispensable `Node`'s parent.
6423 ///
6424 /// After buffers are allocated, failure of the dispensable `Node` (or any
6425 /// child of the dispensable `Node`) does not propagate to the dispensable
6426 /// `Node`'s parent. Failure does propagate from a normal child of a
6427 /// dispensable `Node` to the dispensable `Node`. Failure of a child is
6428 /// blocked from reaching its parent if the child is attached using
6429 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
6430 /// dispensable and the failure occurred after allocation.
6431 ///
6432 /// A dispensable `Node` can be used in cases where a participant needs to
6433 /// provide constraints, but after buffers are allocated, the participant
6434 /// can fail without causing buffer collection failure from the parent
6435 /// `Node`'s point of view.
6436 ///
6437 /// In contrast, `BufferCollection.AttachToken` can be used to create a
6438 /// `BufferCollectionToken` which does not participate in constraints
6439 /// aggregation with its parent `Node`, and whose failure at any time does
6440 /// not propagate to its parent `Node`, and whose potential delay providing
6441 /// constraints does not prevent the parent `Node` from completing its
6442 /// buffer allocation.
6443 ///
6444 /// An initiator (creator of the root `Node` using
6445 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
6446 /// scenarios choose to initially use a dispensable `Node` for a first
6447 /// instance of a participant, and then later if the first instance of that
6448 /// participant fails, a new second instance of that participant my be given
6449 /// a `BufferCollectionToken` created with `AttachToken`.
6450 ///
6451 /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
6452 /// shortly before sending the dispensable `BufferCollectionToken` to a
6453 /// delegate participant. Because `SetDispensable` prevents propagation of
6454 /// child `Node` failure to parent `Node`(s), if the client was relying on
6455 /// noticing child failure via failure of the parent `Node` retained by the
6456 /// client, the client may instead need to notice failure via other means.
6457 /// If other means aren't available/convenient, the client can instead
6458 /// retain the dispensable `Node` and create a child `Node` under that to
6459 /// send to the delegate participant, retaining this `Node` in order to
6460 /// notice failure of the subtree rooted at this `Node` via this `Node`'s
6461 /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
6462 /// (e.g. starting a new instance of the delegate participant and handing it
6463 /// a `BufferCollectionToken` created using
6464 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
6465 /// and clean up in a client-specific way).
6466 ///
6467 /// While it is possible (and potentially useful) to `SetDispensable` on a
6468 /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
6469 /// to later replace a failed dispensable `Node` that was a direct child of
6470 /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
6471 /// (since there's no `AttachToken` on a group). Instead, to enable
6472 /// `AttachToken` replacement in this case, create an additional
6473 /// non-dispensable token that's a direct child of the group and make the
6474 /// existing dispensable token a child of the additional token. This way,
6475 /// the additional token that is a direct child of the group has
6476 /// `BufferCollection.AttachToken` which can be used to replace the failed
6477 /// dispensable token.
6478 ///
6479 /// `SetDispensable` on an already-dispensable token is idempotent.
6480 pub fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
6481 self.client.send::<fidl::encoding::EmptyPayload>(
6482 (),
6483 0x228acf979254df8b,
6484 fidl::encoding::DynamicFlags::FLEXIBLE,
6485 )
6486 }
6487
6488 /// Create a logical OR among a set of tokens, called a
6489 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6490 ///
6491 /// Most sysmem clients and many participants don't need to care about this
6492 /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
6493 /// a participant wants to attempt to include one set of delegate
6494 /// participants, but if constraints don't combine successfully that way,
6495 /// fall back to a different (possibly overlapping) set of delegate
6496 /// participants, and/or fall back to a less demanding strategy (in terms of
6497 /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
6498 /// across all involved delegate participants). In such cases, a
6499 /// `BufferCollectionTokenGroup` is useful.
6500 ///
6501 /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
6502 /// child [`fuchsia.sysmem2/BufferCollectionToken`](s). The child tokens
6503 /// which are not selected during aggregation will fail (close), which a
6504 /// potential participant should notice when their `BufferCollection`
6505 /// channel client endpoint sees PEER_CLOSED, allowing the participant to
6506 /// clean up the speculative usage that didn't end up happening (this is
6507 /// simimlar to a normal `BufferCollection` server end closing on failure to
6508 /// allocate a logical buffer collection or later async failure of a buffer
6509 /// collection).
6510 ///
6511 /// See comments on protocol `BufferCollectionTokenGroup`.
6512 ///
6513 /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
6514 /// applied to the whole group can be achieved with a
6515 /// `BufferCollectionToken` for this purpose as a direct parent of the
6516 /// `BufferCollectionTokenGroup`.
6517 ///
6518 /// All table fields are currently required.
6519 ///
6520 /// + request `group_request` The server end of a
6521 /// `BufferCollectionTokenGroup` channel to be served by sysmem.
6522 pub fn r#create_buffer_collection_token_group(
6523 &self,
6524 mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
6525 ) -> Result<(), fidl::Error> {
6526 self.client.send::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
6527 &mut payload,
6528 0x30f8d48e77bd36f2,
6529 fidl::encoding::DynamicFlags::FLEXIBLE,
6530 )
6531 }
6532}
6533
6534#[cfg(target_os = "fuchsia")]
6535impl From<BufferCollectionTokenSynchronousProxy> for zx::Handle {
6536 fn from(value: BufferCollectionTokenSynchronousProxy) -> Self {
6537 value.into_channel().into()
6538 }
6539}
6540
6541#[cfg(target_os = "fuchsia")]
6542impl From<fidl::Channel> for BufferCollectionTokenSynchronousProxy {
6543 fn from(value: fidl::Channel) -> Self {
6544 Self::new(value)
6545 }
6546}
6547
6548#[cfg(target_os = "fuchsia")]
6549impl fidl::endpoints::FromClient for BufferCollectionTokenSynchronousProxy {
6550 type Protocol = BufferCollectionTokenMarker;
6551
6552 fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>) -> Self {
6553 Self::new(value.into_channel())
6554 }
6555}
6556
6557#[derive(Debug, Clone)]
6558pub struct BufferCollectionTokenProxy {
6559 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
6560}
6561
6562impl fidl::endpoints::Proxy for BufferCollectionTokenProxy {
6563 type Protocol = BufferCollectionTokenMarker;
6564
6565 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
6566 Self::new(inner)
6567 }
6568
6569 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
6570 self.client.into_channel().map_err(|client| Self { client })
6571 }
6572
6573 fn as_channel(&self) -> &::fidl::AsyncChannel {
6574 self.client.as_channel()
6575 }
6576}
6577
6578impl BufferCollectionTokenProxy {
6579 /// Create a new Proxy for fuchsia.sysmem2/BufferCollectionToken.
6580 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
6581 let protocol_name =
6582 <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
6583 Self { client: fidl::client::Client::new(channel, protocol_name) }
6584 }
6585
6586 /// Get a Stream of events from the remote end of the protocol.
6587 ///
6588 /// # Panics
6589 ///
6590 /// Panics if the event stream was already taken.
6591 pub fn take_event_stream(&self) -> BufferCollectionTokenEventStream {
6592 BufferCollectionTokenEventStream { event_receiver: self.client.take_event_receiver() }
6593 }
6594
6595 /// Ensure that previous messages have been received server side. This is
6596 /// particularly useful after previous messages that created new tokens,
6597 /// because a token must be known to the sysmem server before sending the
6598 /// token to another participant.
6599 ///
6600 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
6601 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
6602 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
6603 /// to mitigate the possibility of a hostile/fake
6604 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
6605 /// Another way is to pass the token to
6606 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
6607 /// the token as part of exchanging it for a
6608 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
6609 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
6610 /// of stalling.
6611 ///
6612 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
6613 /// and then starting and completing a `Sync`, it's then safe to send the
6614 /// `BufferCollectionToken` client ends to other participants knowing the
6615 /// server will recognize the tokens when they're sent by the other
6616 /// participants to sysmem in a
6617 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
6618 /// efficient way to create tokens while avoiding unnecessary round trips.
6619 ///
6620 /// Other options include waiting for each
6621 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
6622 /// individually (using separate call to `Sync` after each), or calling
6623 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
6624 /// converted to a `BufferCollection` via
6625 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
6626 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
6627 /// the sync step and can create multiple tokens at once.
6628 pub fn r#sync(
6629 &self,
6630 ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
6631 BufferCollectionTokenProxyInterface::r#sync(self)
6632 }
6633
6634 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
6635 ///
6636 /// Normally a participant will convert a `BufferCollectionToken` into a
6637 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
6638 /// `Release` via the token (and then close the channel immediately or
6639 /// shortly later in response to server closing the server end), which
6640 /// avoids causing buffer collection failure. Without a prior `Release`,
6641 /// closing the `BufferCollectionToken` client end will cause buffer
6642 /// collection failure.
6643 ///
6644 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
6645 ///
6646 /// By default the server handles unexpected closure of a
6647 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
6648 /// first) by failing the buffer collection. Partly this is to expedite
6649 /// closing VMO handles to reclaim memory when any participant fails. If a
6650 /// participant would like to cleanly close a `BufferCollection` without
6651 /// causing buffer collection failure, the participant can send `Release`
6652 /// before closing the `BufferCollection` client end. The `Release` can
6653 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
6654 /// buffer collection won't require constraints from this node in order to
6655 /// allocate. If after `SetConstraints`, the constraints are retained and
6656 /// aggregated, despite the lack of `BufferCollection` connection at the
6657 /// time of constraints aggregation.
6658 ///
6659 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
6660 ///
6661 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
6662 /// end (without `Release` first) will trigger failure of the buffer
6663 /// collection. To close a `BufferCollectionTokenGroup` channel without
6664 /// failing the buffer collection, ensure that AllChildrenPresent() has been
6665 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
6666 /// client end.
6667 ///
6668 /// If `Release` occurs before
6669 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
6670 /// buffer collection will fail (triggered by reception of `Release` without
6671 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
6672 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
6673 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
6674 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
6675 /// close requires `AllChildrenPresent` (if not already sent), then
6676 /// `Release`, then close client end.
6677 ///
6678 /// If `Release` occurs after `AllChildrenPresent`, the children and all
6679 /// their constraints remain intact (just as they would if the
6680 /// `BufferCollectionTokenGroup` channel had remained open), and the client
6681 /// end close doesn't trigger buffer collection failure.
6682 ///
6683 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
6684 ///
6685 /// For brevity, the per-channel-protocol paragraphs above ignore the
6686 /// separate failure domain created by
6687 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
6688 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
6689 /// unexpectedly closes (without `Release` first) and that client end is
6690 /// under a failure domain, instead of failing the whole buffer collection,
6691 /// the failure domain is failed, but the buffer collection itself is
6692 /// isolated from failure of the failure domain. Such failure domains can be
6693 /// nested, in which case only the inner-most failure domain in which the
6694 /// `Node` resides fails.
6695 pub fn r#release(&self) -> Result<(), fidl::Error> {
6696 BufferCollectionTokenProxyInterface::r#release(self)
6697 }
6698
6699 /// Set a name for VMOs in this buffer collection.
6700 ///
6701 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
6702 /// will be truncated to fit. The name of the vmo will be suffixed with the
6703 /// buffer index within the collection (if the suffix fits within
6704 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
6705 /// listed in the inspect data.
6706 ///
6707 /// The name only affects VMOs allocated after the name is set; this call
6708 /// does not rename existing VMOs. If multiple clients set different names
6709 /// then the larger priority value will win. Setting a new name with the
6710 /// same priority as a prior name doesn't change the name.
6711 ///
6712 /// All table fields are currently required.
6713 ///
6714 /// + request `priority` The name is only set if this is the first `SetName`
6715 /// or if `priority` is greater than any previous `priority` value in
6716 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
6717 /// + request `name` The name for VMOs created under this buffer collection.
6718 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
6719 BufferCollectionTokenProxyInterface::r#set_name(self, payload)
6720 }
6721
6722 /// Set information about the current client that can be used by sysmem to
6723 /// help diagnose leaking memory and allocation stalls waiting for a
6724 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
6725 ///
6726 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
6727 /// `Node`(s) derived from this `Node`, unless overriden by
6728 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
6729 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
6730 ///
6731 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
6732 /// `Allocator` is the most efficient way to ensure that all
6733 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
6734 /// set, and is also more efficient than separately sending the same debug
6735 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
6736 /// created [`fuchsia.sysmem2/Node`].
6737 ///
6738 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
6739 /// indicate which client is closing their channel first, leading to subtree
6740 /// failure (which can be normal if the purpose of the subtree is over, but
6741 /// if happening earlier than expected, the client-channel-specific name can
6742 /// help diagnose where the failure is first coming from, from sysmem's
6743 /// point of view).
6744 ///
6745 /// All table fields are currently required.
6746 ///
6747 /// + request `name` This can be an arbitrary string, but the current
6748 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
6749 /// + request `id` This can be an arbitrary id, but the current process ID
6750 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
6751 pub fn r#set_debug_client_info(
6752 &self,
6753 mut payload: &NodeSetDebugClientInfoRequest,
6754 ) -> Result<(), fidl::Error> {
6755 BufferCollectionTokenProxyInterface::r#set_debug_client_info(self, payload)
6756 }
6757
6758 /// Sysmem logs a warning if sysmem hasn't seen
6759 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
6760 /// within 5 seconds after creation of a new collection.
6761 ///
6762 /// Clients can call this method to change when the log is printed. If
6763 /// multiple client set the deadline, it's unspecified which deadline will
6764 /// take effect.
6765 ///
6766 /// In most cases the default works well.
6767 ///
6768 /// All table fields are currently required.
6769 ///
6770 /// + request `deadline` The time at which sysmem will start trying to log
6771 /// the warning, unless all constraints are with sysmem by then.
6772 pub fn r#set_debug_timeout_log_deadline(
6773 &self,
6774 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
6775 ) -> Result<(), fidl::Error> {
6776 BufferCollectionTokenProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
6777 }
6778
6779 /// This enables verbose logging for the buffer collection.
6780 ///
6781 /// Verbose logging includes constraints set via
6782 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
6783 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
6784 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
6785 /// the tree of `Node`(s).
6786 ///
6787 /// Normally sysmem prints only a single line complaint when aggregation
6788 /// fails, with just the specific detailed reason that aggregation failed,
6789 /// with little surrounding context. While this is often enough to diagnose
6790 /// a problem if only a small change was made and everything was working
6791 /// before the small change, it's often not particularly helpful for getting
6792 /// a new buffer collection to work for the first time. Especially with
6793 /// more complex trees of nodes, involving things like
6794 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
6795 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
6796 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
6797 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
6798 /// looks like and why it's failing a logical allocation, or why a tree or
6799 /// subtree is failing sooner than expected.
6800 ///
6801 /// The intent of the extra logging is to be acceptable from a performance
6802 /// point of view, under the assumption that verbose logging is only enabled
6803 /// on a low number of buffer collections. If we're not tracking down a bug,
6804 /// we shouldn't send this message.
6805 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
6806 BufferCollectionTokenProxyInterface::r#set_verbose_logging(self)
6807 }
6808
6809 /// This gets a handle that can be used as a parameter to
6810 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
6811 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
6812 /// client obtained this handle from this `Node`.
6813 ///
6814 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
6815 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
6816 /// despite the two calls typically being on different channels.
6817 ///
6818 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
6819 ///
6820 /// All table fields are currently required.
6821 ///
6822 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
6823 /// different `Node` channel, to prove that the client obtained the handle
6824 /// from this `Node`.
6825 pub fn r#get_node_ref(
6826 &self,
6827 ) -> fidl::client::QueryResponseFut<
6828 NodeGetNodeRefResponse,
6829 fidl::encoding::DefaultFuchsiaResourceDialect,
6830 > {
6831 BufferCollectionTokenProxyInterface::r#get_node_ref(self)
6832 }
6833
6834 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
6835 /// rooted at a different child token of a common parent
6836 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
6837 /// passed-in `node_ref`.
6838 ///
6839 /// This call is for assisting with admission control de-duplication, and
6840 /// with debugging.
6841 ///
6842 /// The `node_ref` must be obtained using
6843 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
6844 ///
6845 /// The `node_ref` can be a duplicated handle; it's not necessary to call
6846 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
6847 ///
6848 /// If a calling token may not actually be a valid token at all due to a
6849 /// potentially hostile/untrusted provider of the token, call
6850 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6851 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
6852 /// never responds due to a calling token not being a real token (not really
6853 /// talking to sysmem). Another option is to call
6854 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
6855 /// which also validates the token along with converting it to a
6856 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
6857 ///
6858 /// All table fields are currently required.
6859 ///
6860 /// - response `is_alternate`
6861 /// - true: The first parent node in common between the calling node and
6862 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
6863 /// that the calling `Node` and the `node_ref` `Node` will not have both
6864 /// their constraints apply - rather sysmem will choose one or the other
6865 /// of the constraints - never both. This is because only one child of
6866 /// a `BufferCollectionTokenGroup` is selected during logical
6867 /// allocation, with only that one child's subtree contributing to
6868 /// constraints aggregation.
6869 /// - false: The first parent node in common between the calling `Node`
6870 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
6871 /// Currently, this means the first parent node in common is a
6872 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
6873 /// `Release`ed). This means that the calling `Node` and the `node_ref`
6874 /// `Node` may have both their constraints apply during constraints
6875 /// aggregation of the logical allocation, if both `Node`(s) are
6876 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
6877 /// this case, there is no `BufferCollectionTokenGroup` that will
6878 /// directly prevent the two `Node`(s) from both being selected and
6879 /// their constraints both aggregated, but even when false, one or both
6880 /// `Node`(s) may still be eliminated from consideration if one or both
6881 /// `Node`(s) has a direct or indirect parent
6882 /// `BufferCollectionTokenGroup` which selects a child subtree other
6883 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
6884 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
6885 /// associated with the same buffer collection as the calling `Node`.
6886 /// Another reason for this error is if the `node_ref` is an
6887 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
6888 /// a real `node_ref` obtained from `GetNodeRef`.
6889 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
6890 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
6891 /// the needed rights expected on a real `node_ref`.
6892 /// * No other failing status codes are returned by this call. However,
6893 /// sysmem may add additional codes in future, so the client should have
6894 /// sensible default handling for any failing status code.
6895 pub fn r#is_alternate_for(
6896 &self,
6897 mut payload: NodeIsAlternateForRequest,
6898 ) -> fidl::client::QueryResponseFut<
6899 NodeIsAlternateForResult,
6900 fidl::encoding::DefaultFuchsiaResourceDialect,
6901 > {
6902 BufferCollectionTokenProxyInterface::r#is_alternate_for(self, payload)
6903 }
6904
6905 /// Get the buffer collection ID. This ID is also available from
6906 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
6907 /// within the collection).
6908 ///
6909 /// This call is mainly useful in situations where we can't convey a
6910 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
6911 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
6912 /// handle, which can be joined back up with a `BufferCollection` client end
6913 /// that was created via a different path. Prefer to convey a
6914 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
6915 ///
6916 /// Trusting a `buffer_collection_id` value from a source other than sysmem
6917 /// is analogous to trusting a koid value from a source other than zircon.
6918 /// Both should be avoided unless really necessary, and both require
6919 /// caution. In some situations it may be reasonable to refer to a
6920 /// pre-established `BufferCollection` by `buffer_collection_id` via a
6921 /// protocol for efficiency reasons, but an incoming value purporting to be
6922 /// a `buffer_collection_id` is not sufficient alone to justify granting the
6923 /// sender of the `buffer_collection_id` any capability. The sender must
6924 /// first prove to a receiver that the sender has/had a VMO or has/had a
6925 /// `BufferCollectionToken` to the same collection by sending a handle that
6926 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
6927 /// `buffer_collection_id` value. The receiver should take care to avoid
6928 /// assuming that a sender had a `BufferCollectionToken` in cases where the
6929 /// sender has only proven that the sender had a VMO.
6930 ///
6931 /// - response `buffer_collection_id` This ID is unique per buffer
6932 /// collection per boot. Each buffer is uniquely identified by the
6933 /// `buffer_collection_id` and `buffer_index` together.
6934 pub fn r#get_buffer_collection_id(
6935 &self,
6936 ) -> fidl::client::QueryResponseFut<
6937 NodeGetBufferCollectionIdResponse,
6938 fidl::encoding::DefaultFuchsiaResourceDialect,
6939 > {
6940 BufferCollectionTokenProxyInterface::r#get_buffer_collection_id(self)
6941 }
6942
6943 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
6944 /// created after this message to weak, which means that a client's `Node`
6945 /// client end (or a child created after this message) is not alone
6946 /// sufficient to keep allocated VMOs alive.
6947 ///
6948 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
6949 /// `close_weak_asap`.
6950 ///
6951 /// This message is only permitted before the `Node` becomes ready for
6952 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
6953 /// * `BufferCollectionToken`: any time
6954 /// * `BufferCollection`: before `SetConstraints`
6955 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
6956 ///
6957 /// Currently, no conversion from strong `Node` to weak `Node` after ready
6958 /// for allocation is provided, but a client can simulate that by creating
6959 /// an additional `Node` before allocation and setting that additional
6960 /// `Node` to weak, and then potentially at some point later sending
6961 /// `Release` and closing the client end of the client's strong `Node`, but
6962 /// keeping the client's weak `Node`.
6963 ///
6964 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
6965 /// collection failure (all `Node` client end(s) will see
6966 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
6967 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
6968 /// this situation until all `Node`(s) are ready for allocation. For initial
6969 /// allocation to succeed, at least one strong `Node` is required to exist
6970 /// at allocation time, but after that client receives VMO handles, that
6971 /// client can `BufferCollection.Release` and close the client end without
6972 /// causing this type of failure.
6973 ///
6974 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
6975 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
6976 /// separately as appropriate.
6977 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
6978 BufferCollectionTokenProxyInterface::r#set_weak(self)
6979 }
6980
6981 /// This indicates to sysmem that the client is prepared to pay attention to
6982 /// `close_weak_asap`.
6983 ///
6984 /// If sent, this message must be before
6985 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
6986 ///
6987 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
6988 /// send this message before `WaitForAllBuffersAllocated`, or a parent
6989 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
6990 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
6991 /// trigger buffer collection failure.
6992 ///
6993 /// This message is necessary because weak sysmem VMOs have not always been
6994 /// a thing, so older clients are not aware of the need to pay attention to
6995 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
6996 /// sysmem weak VMO handles asap. By having this message and requiring
6997 /// participants to indicate their acceptance of this aspect of the overall
6998 /// protocol, we avoid situations where an older client is delivered a weak
6999 /// VMO without any way for sysmem to get that VMO to close quickly later
7000 /// (and on a per-buffer basis).
7001 ///
7002 /// A participant that doesn't handle `close_weak_asap` and also doesn't
7003 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
7004 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
7005 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
7006 /// same participant has a child/delegate which does retrieve VMOs, that
7007 /// child/delegate will need to send `SetWeakOk` before
7008 /// `WaitForAllBuffersAllocated`.
7009 ///
7010 /// + request `for_child_nodes_also` If present and true, this means direct
7011 /// child nodes of this node created after this message plus all
7012 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
7013 /// those nodes. Any child node of this node that was created before this
7014 /// message is not included. This setting is "sticky" in the sense that a
7015 /// subsequent `SetWeakOk` without this bool set to true does not reset
7016 /// the server-side bool. If this creates a problem for a participant, a
7017 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
7018 /// tokens instead, as appropriate. A participant should only set
7019 /// `for_child_nodes_also` true if the participant can really promise to
7020 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
7021 /// weak VMO handles held by participants holding the corresponding child
7022 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
7023 /// which are using sysmem(1) can be weak, despite the clients of those
7024 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
7025 /// direct way to find out about `close_weak_asap`. This only applies to
7026 /// descendents of this `Node` which are using sysmem(1), not to this
7027 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
7028 /// token, which will fail allocation unless an ancestor of this `Node`
7029 /// specified `for_child_nodes_also` true.
7030 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
7031 BufferCollectionTokenProxyInterface::r#set_weak_ok(self, payload)
7032 }
7033
7034 /// The server_end will be closed after this `Node` and any child nodes have
7035 /// have released their buffer counts, making those counts available for
7036 /// reservation by a different `Node` via
7037 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
7038 ///
7039 /// The `Node` buffer counts may not be released until the entire tree of
7040 /// `Node`(s) is closed or failed, because
7041 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
7042 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
7043 /// `Node` buffer counts remain reserved until the orphaned node is later
7044 /// cleaned up.
7045 ///
7046 /// If the `Node` exceeds a fairly large number of attached eventpair server
7047 /// ends, a log message will indicate this and the `Node` (and the
7048 /// appropriate) sub-tree will fail.
7049 ///
7050 /// The `server_end` will remain open when
7051 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
7052 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
7053 /// [`fuchsia.sysmem2/BufferCollection`].
7054 ///
7055 /// This message can also be used with a
7056 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
7057 pub fn r#attach_node_tracking(
7058 &self,
7059 mut payload: NodeAttachNodeTrackingRequest,
7060 ) -> Result<(), fidl::Error> {
7061 BufferCollectionTokenProxyInterface::r#attach_node_tracking(self, payload)
7062 }
7063
7064 /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
7065 /// one, referring to the same buffer collection.
7066 ///
7067 /// The created tokens are children of this token in the
7068 /// [`fuchsia.sysmem2/Node`] heirarchy.
7069 ///
7070 /// This method can be used to add more participants, by transferring the
7071 /// newly created tokens to additional participants.
7072 ///
7073 /// A new token will be returned for each entry in the
7074 /// `rights_attenuation_masks` array.
7075 ///
7076 /// If the called token may not actually be a valid token due to a
7077 /// potentially hostile/untrusted provider of the token, consider using
7078 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
7079 /// instead of potentially getting stuck indefinitely if
7080 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
7081 /// due to the calling token not being a real token.
7082 ///
7083 /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
7084 /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
7085 /// method, because the sync step is included in this call, at the cost of a
7086 /// round trip during this call.
7087 ///
7088 /// All tokens must be turned in to sysmem via
7089 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
7090 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
7091 /// successfully allocate buffers (or to logically allocate buffers in the
7092 /// case of subtrees involving
7093 /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
7094 ///
7095 /// All table fields are currently required.
7096 ///
7097 /// + request `rights_attenuation_mask` In each entry of
7098 /// `rights_attenuation_masks`, rights bits that are zero will be absent
7099 /// in the buffer VMO rights obtainable via the corresponding returned
7100 /// token. This allows an initiator or intermediary participant to
7101 /// attenuate the rights available to a participant. This does not allow a
7102 /// participant to gain rights that the participant doesn't already have.
7103 /// The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
7104 /// attenuation should be applied.
7105 /// - response `tokens` The client ends of each newly created token.
7106 pub fn r#duplicate_sync(
7107 &self,
7108 mut payload: &BufferCollectionTokenDuplicateSyncRequest,
7109 ) -> fidl::client::QueryResponseFut<
7110 BufferCollectionTokenDuplicateSyncResponse,
7111 fidl::encoding::DefaultFuchsiaResourceDialect,
7112 > {
7113 BufferCollectionTokenProxyInterface::r#duplicate_sync(self, payload)
7114 }
7115
7116 /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
7117 /// one, referring to the same buffer collection.
7118 ///
7119 /// The created token is a child of this token in the
7120 /// [`fuchsia.sysmem2/Node`] heirarchy.
7121 ///
7122 /// This method can be used to add a participant, by transferring the newly
7123 /// created token to another participant.
7124 ///
7125 /// This one-way message can be used instead of the two-way
7126 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
7127 /// performance sensitive cases where it would be undesireable to wait for
7128 /// sysmem to respond to
7129 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
7130 /// client code isn't structured to make it easy to duplicate all the needed
7131 /// tokens at once.
7132 ///
7133 /// After sending one or more `Duplicate` messages, and before sending the
7134 /// newly created child tokens to other participants (or to other
7135 /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
7136 /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
7137 /// `Sync` call can be made on the token, or on the `BufferCollection`
7138 /// obtained by passing this token to `BindSharedCollection`. Either will
7139 /// ensure that the server knows about the tokens created via `Duplicate`
7140 /// before the other participant sends the token to the server via separate
7141 /// `Allocator` channel.
7142 ///
7143 /// All tokens must be turned in via
7144 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
7145 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
7146 /// successfully allocate buffers.
7147 ///
7148 /// All table fields are currently required.
7149 ///
7150 /// + request `rights_attenuation_mask` The rights bits that are zero in
7151 /// this mask will be absent in the buffer VMO rights obtainable via the
7152 /// client end of `token_request`. This allows an initiator or
7153 /// intermediary participant to attenuate the rights available to a
7154 /// delegate participant. This does not allow a participant to gain rights
7155 /// that the participant doesn't already have. The value
7156 /// `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
7157 /// should be applied.
7158 /// + These values for rights_attenuation_mask result in no attenuation:
7159 /// + `ZX_RIGHT_SAME_RIGHTS` (preferred)
7160 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is
7161 /// computed)
7162 /// + 0 (deprecated - do not use 0 - an ERROR will go to the log)
7163 /// + request `token_request` is the server end of a `BufferCollectionToken`
7164 /// channel. The client end of this channel acts as another participant in
7165 /// the shared buffer collection.
7166 pub fn r#duplicate(
7167 &self,
7168 mut payload: BufferCollectionTokenDuplicateRequest,
7169 ) -> Result<(), fidl::Error> {
7170 BufferCollectionTokenProxyInterface::r#duplicate(self, payload)
7171 }
7172
7173 /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
7174 ///
7175 /// When the `BufferCollectionToken` is converted to a
7176 /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
7177 /// the `BufferCollection` also.
7178 ///
7179 /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
7180 /// client end without having sent
7181 /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
7182 /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
7183 /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
7184 /// to the root `Node`, which fails the whole buffer collection. In
7185 /// contrast, a dispensable `Node` can fail after buffers are allocated
7186 /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
7187 /// heirarchy.
7188 ///
7189 /// The dispensable `Node` participates in constraints aggregation along
7190 /// with its parent before buffer allocation. If the dispensable `Node`
7191 /// fails before buffers are allocated, the failure propagates to the
7192 /// dispensable `Node`'s parent.
7193 ///
7194 /// After buffers are allocated, failure of the dispensable `Node` (or any
7195 /// child of the dispensable `Node`) does not propagate to the dispensable
7196 /// `Node`'s parent. Failure does propagate from a normal child of a
7197 /// dispensable `Node` to the dispensable `Node`. Failure of a child is
7198 /// blocked from reaching its parent if the child is attached using
7199 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
7200 /// dispensable and the failure occurred after allocation.
7201 ///
7202 /// A dispensable `Node` can be used in cases where a participant needs to
7203 /// provide constraints, but after buffers are allocated, the participant
7204 /// can fail without causing buffer collection failure from the parent
7205 /// `Node`'s point of view.
7206 ///
7207 /// In contrast, `BufferCollection.AttachToken` can be used to create a
7208 /// `BufferCollectionToken` which does not participate in constraints
7209 /// aggregation with its parent `Node`, and whose failure at any time does
7210 /// not propagate to its parent `Node`, and whose potential delay providing
7211 /// constraints does not prevent the parent `Node` from completing its
7212 /// buffer allocation.
7213 ///
7214 /// An initiator (creator of the root `Node` using
7215 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
7216 /// scenarios choose to initially use a dispensable `Node` for a first
7217 /// instance of a participant, and then later if the first instance of that
7218 /// participant fails, a new second instance of that participant my be given
7219 /// a `BufferCollectionToken` created with `AttachToken`.
7220 ///
7221 /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
7222 /// shortly before sending the dispensable `BufferCollectionToken` to a
7223 /// delegate participant. Because `SetDispensable` prevents propagation of
7224 /// child `Node` failure to parent `Node`(s), if the client was relying on
7225 /// noticing child failure via failure of the parent `Node` retained by the
7226 /// client, the client may instead need to notice failure via other means.
7227 /// If other means aren't available/convenient, the client can instead
7228 /// retain the dispensable `Node` and create a child `Node` under that to
7229 /// send to the delegate participant, retaining this `Node` in order to
7230 /// notice failure of the subtree rooted at this `Node` via this `Node`'s
7231 /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
7232 /// (e.g. starting a new instance of the delegate participant and handing it
7233 /// a `BufferCollectionToken` created using
7234 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
7235 /// and clean up in a client-specific way).
7236 ///
7237 /// While it is possible (and potentially useful) to `SetDispensable` on a
7238 /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
7239 /// to later replace a failed dispensable `Node` that was a direct child of
7240 /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
7241 /// (since there's no `AttachToken` on a group). Instead, to enable
7242 /// `AttachToken` replacement in this case, create an additional
7243 /// non-dispensable token that's a direct child of the group and make the
7244 /// existing dispensable token a child of the additional token. This way,
7245 /// the additional token that is a direct child of the group has
7246 /// `BufferCollection.AttachToken` which can be used to replace the failed
7247 /// dispensable token.
7248 ///
7249 /// `SetDispensable` on an already-dispensable token is idempotent.
7250 pub fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
7251 BufferCollectionTokenProxyInterface::r#set_dispensable(self)
7252 }
7253
7254 /// Create a logical OR among a set of tokens, called a
7255 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
7256 ///
7257 /// Most sysmem clients and many participants don't need to care about this
7258 /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
7259 /// a participant wants to attempt to include one set of delegate
7260 /// participants, but if constraints don't combine successfully that way,
7261 /// fall back to a different (possibly overlapping) set of delegate
7262 /// participants, and/or fall back to a less demanding strategy (in terms of
7263 /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
7264 /// across all involved delegate participants). In such cases, a
7265 /// `BufferCollectionTokenGroup` is useful.
7266 ///
7267 /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
7268 /// child [`fuchsia.sysmem2/BufferCollectionToken`](s). The child tokens
7269 /// which are not selected during aggregation will fail (close), which a
7270 /// potential participant should notice when their `BufferCollection`
7271 /// channel client endpoint sees PEER_CLOSED, allowing the participant to
7272 /// clean up the speculative usage that didn't end up happening (this is
7273 /// simimlar to a normal `BufferCollection` server end closing on failure to
7274 /// allocate a logical buffer collection or later async failure of a buffer
7275 /// collection).
7276 ///
7277 /// See comments on protocol `BufferCollectionTokenGroup`.
7278 ///
7279 /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
7280 /// applied to the whole group can be achieved with a
7281 /// `BufferCollectionToken` for this purpose as a direct parent of the
7282 /// `BufferCollectionTokenGroup`.
7283 ///
7284 /// All table fields are currently required.
7285 ///
7286 /// + request `group_request` The server end of a
7287 /// `BufferCollectionTokenGroup` channel to be served by sysmem.
7288 pub fn r#create_buffer_collection_token_group(
7289 &self,
7290 mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
7291 ) -> Result<(), fidl::Error> {
7292 BufferCollectionTokenProxyInterface::r#create_buffer_collection_token_group(self, payload)
7293 }
7294}
7295
7296impl BufferCollectionTokenProxyInterface for BufferCollectionTokenProxy {
7297 type SyncResponseFut =
7298 fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
7299 fn r#sync(&self) -> Self::SyncResponseFut {
7300 fn _decode(
7301 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7302 ) -> Result<(), fidl::Error> {
7303 let _response = fidl::client::decode_transaction_body::<
7304 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
7305 fidl::encoding::DefaultFuchsiaResourceDialect,
7306 0x11ac2555cf575b54,
7307 >(_buf?)?
7308 .into_result::<BufferCollectionTokenMarker>("sync")?;
7309 Ok(_response)
7310 }
7311 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
7312 (),
7313 0x11ac2555cf575b54,
7314 fidl::encoding::DynamicFlags::FLEXIBLE,
7315 _decode,
7316 )
7317 }
7318
7319 fn r#release(&self) -> Result<(), fidl::Error> {
7320 self.client.send::<fidl::encoding::EmptyPayload>(
7321 (),
7322 0x6a5cae7d6d6e04c6,
7323 fidl::encoding::DynamicFlags::FLEXIBLE,
7324 )
7325 }
7326
7327 fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
7328 self.client.send::<NodeSetNameRequest>(
7329 payload,
7330 0xb41f1624f48c1e9,
7331 fidl::encoding::DynamicFlags::FLEXIBLE,
7332 )
7333 }
7334
7335 fn r#set_debug_client_info(
7336 &self,
7337 mut payload: &NodeSetDebugClientInfoRequest,
7338 ) -> Result<(), fidl::Error> {
7339 self.client.send::<NodeSetDebugClientInfoRequest>(
7340 payload,
7341 0x5cde8914608d99b1,
7342 fidl::encoding::DynamicFlags::FLEXIBLE,
7343 )
7344 }
7345
7346 fn r#set_debug_timeout_log_deadline(
7347 &self,
7348 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
7349 ) -> Result<(), fidl::Error> {
7350 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
7351 payload,
7352 0x716b0af13d5c0806,
7353 fidl::encoding::DynamicFlags::FLEXIBLE,
7354 )
7355 }
7356
7357 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
7358 self.client.send::<fidl::encoding::EmptyPayload>(
7359 (),
7360 0x5209c77415b4dfad,
7361 fidl::encoding::DynamicFlags::FLEXIBLE,
7362 )
7363 }
7364
7365 type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
7366 NodeGetNodeRefResponse,
7367 fidl::encoding::DefaultFuchsiaResourceDialect,
7368 >;
7369 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
7370 fn _decode(
7371 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7372 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
7373 let _response = fidl::client::decode_transaction_body::<
7374 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
7375 fidl::encoding::DefaultFuchsiaResourceDialect,
7376 0x5b3d0e51614df053,
7377 >(_buf?)?
7378 .into_result::<BufferCollectionTokenMarker>("get_node_ref")?;
7379 Ok(_response)
7380 }
7381 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
7382 (),
7383 0x5b3d0e51614df053,
7384 fidl::encoding::DynamicFlags::FLEXIBLE,
7385 _decode,
7386 )
7387 }
7388
7389 type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
7390 NodeIsAlternateForResult,
7391 fidl::encoding::DefaultFuchsiaResourceDialect,
7392 >;
7393 fn r#is_alternate_for(
7394 &self,
7395 mut payload: NodeIsAlternateForRequest,
7396 ) -> Self::IsAlternateForResponseFut {
7397 fn _decode(
7398 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7399 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
7400 let _response = fidl::client::decode_transaction_body::<
7401 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
7402 fidl::encoding::DefaultFuchsiaResourceDialect,
7403 0x3a58e00157e0825,
7404 >(_buf?)?
7405 .into_result::<BufferCollectionTokenMarker>("is_alternate_for")?;
7406 Ok(_response.map(|x| x))
7407 }
7408 self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
7409 &mut payload,
7410 0x3a58e00157e0825,
7411 fidl::encoding::DynamicFlags::FLEXIBLE,
7412 _decode,
7413 )
7414 }
7415
7416 type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
7417 NodeGetBufferCollectionIdResponse,
7418 fidl::encoding::DefaultFuchsiaResourceDialect,
7419 >;
7420 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
7421 fn _decode(
7422 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7423 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
7424 let _response = fidl::client::decode_transaction_body::<
7425 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
7426 fidl::encoding::DefaultFuchsiaResourceDialect,
7427 0x77d19a494b78ba8c,
7428 >(_buf?)?
7429 .into_result::<BufferCollectionTokenMarker>("get_buffer_collection_id")?;
7430 Ok(_response)
7431 }
7432 self.client.send_query_and_decode::<
7433 fidl::encoding::EmptyPayload,
7434 NodeGetBufferCollectionIdResponse,
7435 >(
7436 (),
7437 0x77d19a494b78ba8c,
7438 fidl::encoding::DynamicFlags::FLEXIBLE,
7439 _decode,
7440 )
7441 }
7442
7443 fn r#set_weak(&self) -> Result<(), fidl::Error> {
7444 self.client.send::<fidl::encoding::EmptyPayload>(
7445 (),
7446 0x22dd3ea514eeffe1,
7447 fidl::encoding::DynamicFlags::FLEXIBLE,
7448 )
7449 }
7450
7451 fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
7452 self.client.send::<NodeSetWeakOkRequest>(
7453 &mut payload,
7454 0x38a44fc4d7724be9,
7455 fidl::encoding::DynamicFlags::FLEXIBLE,
7456 )
7457 }
7458
7459 fn r#attach_node_tracking(
7460 &self,
7461 mut payload: NodeAttachNodeTrackingRequest,
7462 ) -> Result<(), fidl::Error> {
7463 self.client.send::<NodeAttachNodeTrackingRequest>(
7464 &mut payload,
7465 0x3f22f2a293d3cdac,
7466 fidl::encoding::DynamicFlags::FLEXIBLE,
7467 )
7468 }
7469
7470 type DuplicateSyncResponseFut = fidl::client::QueryResponseFut<
7471 BufferCollectionTokenDuplicateSyncResponse,
7472 fidl::encoding::DefaultFuchsiaResourceDialect,
7473 >;
7474 fn r#duplicate_sync(
7475 &self,
7476 mut payload: &BufferCollectionTokenDuplicateSyncRequest,
7477 ) -> Self::DuplicateSyncResponseFut {
7478 fn _decode(
7479 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7480 ) -> Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error> {
7481 let _response = fidl::client::decode_transaction_body::<
7482 fidl::encoding::FlexibleType<BufferCollectionTokenDuplicateSyncResponse>,
7483 fidl::encoding::DefaultFuchsiaResourceDialect,
7484 0x1c1af9919d1ca45c,
7485 >(_buf?)?
7486 .into_result::<BufferCollectionTokenMarker>("duplicate_sync")?;
7487 Ok(_response)
7488 }
7489 self.client.send_query_and_decode::<
7490 BufferCollectionTokenDuplicateSyncRequest,
7491 BufferCollectionTokenDuplicateSyncResponse,
7492 >(
7493 payload,
7494 0x1c1af9919d1ca45c,
7495 fidl::encoding::DynamicFlags::FLEXIBLE,
7496 _decode,
7497 )
7498 }
7499
7500 fn r#duplicate(
7501 &self,
7502 mut payload: BufferCollectionTokenDuplicateRequest,
7503 ) -> Result<(), fidl::Error> {
7504 self.client.send::<BufferCollectionTokenDuplicateRequest>(
7505 &mut payload,
7506 0x73e78f92ee7fb887,
7507 fidl::encoding::DynamicFlags::FLEXIBLE,
7508 )
7509 }
7510
7511 fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
7512 self.client.send::<fidl::encoding::EmptyPayload>(
7513 (),
7514 0x228acf979254df8b,
7515 fidl::encoding::DynamicFlags::FLEXIBLE,
7516 )
7517 }
7518
7519 fn r#create_buffer_collection_token_group(
7520 &self,
7521 mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
7522 ) -> Result<(), fidl::Error> {
7523 self.client.send::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
7524 &mut payload,
7525 0x30f8d48e77bd36f2,
7526 fidl::encoding::DynamicFlags::FLEXIBLE,
7527 )
7528 }
7529}
7530
7531pub struct BufferCollectionTokenEventStream {
7532 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
7533}
7534
7535impl std::marker::Unpin for BufferCollectionTokenEventStream {}
7536
7537impl futures::stream::FusedStream for BufferCollectionTokenEventStream {
7538 fn is_terminated(&self) -> bool {
7539 self.event_receiver.is_terminated()
7540 }
7541}
7542
7543impl futures::Stream for BufferCollectionTokenEventStream {
7544 type Item = Result<BufferCollectionTokenEvent, fidl::Error>;
7545
7546 fn poll_next(
7547 mut self: std::pin::Pin<&mut Self>,
7548 cx: &mut std::task::Context<'_>,
7549 ) -> std::task::Poll<Option<Self::Item>> {
7550 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
7551 &mut self.event_receiver,
7552 cx
7553 )?) {
7554 Some(buf) => std::task::Poll::Ready(Some(BufferCollectionTokenEvent::decode(buf))),
7555 None => std::task::Poll::Ready(None),
7556 }
7557 }
7558}
7559
7560#[derive(Debug)]
7561pub enum BufferCollectionTokenEvent {
7562 #[non_exhaustive]
7563 _UnknownEvent {
7564 /// Ordinal of the event that was sent.
7565 ordinal: u64,
7566 },
7567}
7568
7569impl BufferCollectionTokenEvent {
7570 /// Decodes a message buffer as a [`BufferCollectionTokenEvent`].
7571 fn decode(
7572 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
7573 ) -> Result<BufferCollectionTokenEvent, fidl::Error> {
7574 let (bytes, _handles) = buf.split_mut();
7575 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
7576 debug_assert_eq!(tx_header.tx_id, 0);
7577 match tx_header.ordinal {
7578 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7579 Ok(BufferCollectionTokenEvent::_UnknownEvent { ordinal: tx_header.ordinal })
7580 }
7581 _ => Err(fidl::Error::UnknownOrdinal {
7582 ordinal: tx_header.ordinal,
7583 protocol_name:
7584 <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
7585 }),
7586 }
7587 }
7588}
7589
7590/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollectionToken.
7591pub struct BufferCollectionTokenRequestStream {
7592 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
7593 is_terminated: bool,
7594}
7595
7596impl std::marker::Unpin for BufferCollectionTokenRequestStream {}
7597
7598impl futures::stream::FusedStream for BufferCollectionTokenRequestStream {
7599 fn is_terminated(&self) -> bool {
7600 self.is_terminated
7601 }
7602}
7603
7604impl fidl::endpoints::RequestStream for BufferCollectionTokenRequestStream {
7605 type Protocol = BufferCollectionTokenMarker;
7606 type ControlHandle = BufferCollectionTokenControlHandle;
7607
7608 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
7609 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
7610 }
7611
7612 fn control_handle(&self) -> Self::ControlHandle {
7613 BufferCollectionTokenControlHandle { inner: self.inner.clone() }
7614 }
7615
7616 fn into_inner(
7617 self,
7618 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
7619 {
7620 (self.inner, self.is_terminated)
7621 }
7622
7623 fn from_inner(
7624 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
7625 is_terminated: bool,
7626 ) -> Self {
7627 Self { inner, is_terminated }
7628 }
7629}
7630
7631impl futures::Stream for BufferCollectionTokenRequestStream {
7632 type Item = Result<BufferCollectionTokenRequest, fidl::Error>;
7633
7634 fn poll_next(
7635 mut self: std::pin::Pin<&mut Self>,
7636 cx: &mut std::task::Context<'_>,
7637 ) -> std::task::Poll<Option<Self::Item>> {
7638 let this = &mut *self;
7639 if this.inner.check_shutdown(cx) {
7640 this.is_terminated = true;
7641 return std::task::Poll::Ready(None);
7642 }
7643 if this.is_terminated {
7644 panic!("polled BufferCollectionTokenRequestStream after completion");
7645 }
7646 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
7647 |bytes, handles| {
7648 match this.inner.channel().read_etc(cx, bytes, handles) {
7649 std::task::Poll::Ready(Ok(())) => {}
7650 std::task::Poll::Pending => return std::task::Poll::Pending,
7651 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
7652 this.is_terminated = true;
7653 return std::task::Poll::Ready(None);
7654 }
7655 std::task::Poll::Ready(Err(e)) => {
7656 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
7657 e.into(),
7658 ))))
7659 }
7660 }
7661
7662 // A message has been received from the channel
7663 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
7664
7665 std::task::Poll::Ready(Some(match header.ordinal {
7666 0x11ac2555cf575b54 => {
7667 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7668 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7669 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7670 let control_handle = BufferCollectionTokenControlHandle {
7671 inner: this.inner.clone(),
7672 };
7673 Ok(BufferCollectionTokenRequest::Sync {
7674 responder: BufferCollectionTokenSyncResponder {
7675 control_handle: std::mem::ManuallyDrop::new(control_handle),
7676 tx_id: header.tx_id,
7677 },
7678 })
7679 }
7680 0x6a5cae7d6d6e04c6 => {
7681 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7682 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7683 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7684 let control_handle = BufferCollectionTokenControlHandle {
7685 inner: this.inner.clone(),
7686 };
7687 Ok(BufferCollectionTokenRequest::Release {
7688 control_handle,
7689 })
7690 }
7691 0xb41f1624f48c1e9 => {
7692 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7693 let mut req = fidl::new_empty!(NodeSetNameRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7694 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
7695 let control_handle = BufferCollectionTokenControlHandle {
7696 inner: this.inner.clone(),
7697 };
7698 Ok(BufferCollectionTokenRequest::SetName {payload: req,
7699 control_handle,
7700 })
7701 }
7702 0x5cde8914608d99b1 => {
7703 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7704 let mut req = fidl::new_empty!(NodeSetDebugClientInfoRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7705 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
7706 let control_handle = BufferCollectionTokenControlHandle {
7707 inner: this.inner.clone(),
7708 };
7709 Ok(BufferCollectionTokenRequest::SetDebugClientInfo {payload: req,
7710 control_handle,
7711 })
7712 }
7713 0x716b0af13d5c0806 => {
7714 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7715 let mut req = fidl::new_empty!(NodeSetDebugTimeoutLogDeadlineRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7716 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
7717 let control_handle = BufferCollectionTokenControlHandle {
7718 inner: this.inner.clone(),
7719 };
7720 Ok(BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline {payload: req,
7721 control_handle,
7722 })
7723 }
7724 0x5209c77415b4dfad => {
7725 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7726 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7727 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7728 let control_handle = BufferCollectionTokenControlHandle {
7729 inner: this.inner.clone(),
7730 };
7731 Ok(BufferCollectionTokenRequest::SetVerboseLogging {
7732 control_handle,
7733 })
7734 }
7735 0x5b3d0e51614df053 => {
7736 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7737 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7738 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7739 let control_handle = BufferCollectionTokenControlHandle {
7740 inner: this.inner.clone(),
7741 };
7742 Ok(BufferCollectionTokenRequest::GetNodeRef {
7743 responder: BufferCollectionTokenGetNodeRefResponder {
7744 control_handle: std::mem::ManuallyDrop::new(control_handle),
7745 tx_id: header.tx_id,
7746 },
7747 })
7748 }
7749 0x3a58e00157e0825 => {
7750 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7751 let mut req = fidl::new_empty!(NodeIsAlternateForRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7752 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
7753 let control_handle = BufferCollectionTokenControlHandle {
7754 inner: this.inner.clone(),
7755 };
7756 Ok(BufferCollectionTokenRequest::IsAlternateFor {payload: req,
7757 responder: BufferCollectionTokenIsAlternateForResponder {
7758 control_handle: std::mem::ManuallyDrop::new(control_handle),
7759 tx_id: header.tx_id,
7760 },
7761 })
7762 }
7763 0x77d19a494b78ba8c => {
7764 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7765 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7766 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7767 let control_handle = BufferCollectionTokenControlHandle {
7768 inner: this.inner.clone(),
7769 };
7770 Ok(BufferCollectionTokenRequest::GetBufferCollectionId {
7771 responder: BufferCollectionTokenGetBufferCollectionIdResponder {
7772 control_handle: std::mem::ManuallyDrop::new(control_handle),
7773 tx_id: header.tx_id,
7774 },
7775 })
7776 }
7777 0x22dd3ea514eeffe1 => {
7778 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7779 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7780 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7781 let control_handle = BufferCollectionTokenControlHandle {
7782 inner: this.inner.clone(),
7783 };
7784 Ok(BufferCollectionTokenRequest::SetWeak {
7785 control_handle,
7786 })
7787 }
7788 0x38a44fc4d7724be9 => {
7789 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7790 let mut req = fidl::new_empty!(NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7791 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
7792 let control_handle = BufferCollectionTokenControlHandle {
7793 inner: this.inner.clone(),
7794 };
7795 Ok(BufferCollectionTokenRequest::SetWeakOk {payload: req,
7796 control_handle,
7797 })
7798 }
7799 0x3f22f2a293d3cdac => {
7800 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7801 let mut req = fidl::new_empty!(NodeAttachNodeTrackingRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7802 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
7803 let control_handle = BufferCollectionTokenControlHandle {
7804 inner: this.inner.clone(),
7805 };
7806 Ok(BufferCollectionTokenRequest::AttachNodeTracking {payload: req,
7807 control_handle,
7808 })
7809 }
7810 0x1c1af9919d1ca45c => {
7811 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7812 let mut req = fidl::new_empty!(BufferCollectionTokenDuplicateSyncRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7813 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenDuplicateSyncRequest>(&header, _body_bytes, handles, &mut req)?;
7814 let control_handle = BufferCollectionTokenControlHandle {
7815 inner: this.inner.clone(),
7816 };
7817 Ok(BufferCollectionTokenRequest::DuplicateSync {payload: req,
7818 responder: BufferCollectionTokenDuplicateSyncResponder {
7819 control_handle: std::mem::ManuallyDrop::new(control_handle),
7820 tx_id: header.tx_id,
7821 },
7822 })
7823 }
7824 0x73e78f92ee7fb887 => {
7825 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7826 let mut req = fidl::new_empty!(BufferCollectionTokenDuplicateRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7827 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenDuplicateRequest>(&header, _body_bytes, handles, &mut req)?;
7828 let control_handle = BufferCollectionTokenControlHandle {
7829 inner: this.inner.clone(),
7830 };
7831 Ok(BufferCollectionTokenRequest::Duplicate {payload: req,
7832 control_handle,
7833 })
7834 }
7835 0x228acf979254df8b => {
7836 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7837 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7838 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7839 let control_handle = BufferCollectionTokenControlHandle {
7840 inner: this.inner.clone(),
7841 };
7842 Ok(BufferCollectionTokenRequest::SetDispensable {
7843 control_handle,
7844 })
7845 }
7846 0x30f8d48e77bd36f2 => {
7847 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7848 let mut req = fidl::new_empty!(BufferCollectionTokenCreateBufferCollectionTokenGroupRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7849 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(&header, _body_bytes, handles, &mut req)?;
7850 let control_handle = BufferCollectionTokenControlHandle {
7851 inner: this.inner.clone(),
7852 };
7853 Ok(BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup {payload: req,
7854 control_handle,
7855 })
7856 }
7857 _ if header.tx_id == 0 && header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7858 Ok(BufferCollectionTokenRequest::_UnknownMethod {
7859 ordinal: header.ordinal,
7860 control_handle: BufferCollectionTokenControlHandle { inner: this.inner.clone() },
7861 method_type: fidl::MethodType::OneWay,
7862 })
7863 }
7864 _ if header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7865 this.inner.send_framework_err(
7866 fidl::encoding::FrameworkErr::UnknownMethod,
7867 header.tx_id,
7868 header.ordinal,
7869 header.dynamic_flags(),
7870 (bytes, handles),
7871 )?;
7872 Ok(BufferCollectionTokenRequest::_UnknownMethod {
7873 ordinal: header.ordinal,
7874 control_handle: BufferCollectionTokenControlHandle { inner: this.inner.clone() },
7875 method_type: fidl::MethodType::TwoWay,
7876 })
7877 }
7878 _ => Err(fidl::Error::UnknownOrdinal {
7879 ordinal: header.ordinal,
7880 protocol_name: <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
7881 }),
7882 }))
7883 },
7884 )
7885 }
7886}
7887
7888/// A [`fuchsia.sysmem2/BufferCollectionToken`] is not a buffer collection, but
7889/// rather is a way to identify a specific potential shared buffer collection,
7890/// and a way to distribute that potential shared buffer collection to
7891/// additional participants prior to the buffer collection allocating any
7892/// buffers.
7893///
7894/// Epitaphs are not used in this protocol.
7895///
7896/// We use a channel for the `BufferCollectionToken` instead of a single
7897/// `eventpair` (pair) because this way we can detect error conditions like a
7898/// participant failing mid-create.
7899#[derive(Debug)]
7900pub enum BufferCollectionTokenRequest {
7901 /// Ensure that previous messages have been received server side. This is
7902 /// particularly useful after previous messages that created new tokens,
7903 /// because a token must be known to the sysmem server before sending the
7904 /// token to another participant.
7905 ///
7906 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
7907 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
7908 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
7909 /// to mitigate the possibility of a hostile/fake
7910 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
7911 /// Another way is to pass the token to
7912 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
7913 /// the token as part of exchanging it for a
7914 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
7915 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
7916 /// of stalling.
7917 ///
7918 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
7919 /// and then starting and completing a `Sync`, it's then safe to send the
7920 /// `BufferCollectionToken` client ends to other participants knowing the
7921 /// server will recognize the tokens when they're sent by the other
7922 /// participants to sysmem in a
7923 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
7924 /// efficient way to create tokens while avoiding unnecessary round trips.
7925 ///
7926 /// Other options include waiting for each
7927 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
7928 /// individually (using separate call to `Sync` after each), or calling
7929 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
7930 /// converted to a `BufferCollection` via
7931 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
7932 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
7933 /// the sync step and can create multiple tokens at once.
7934 Sync { responder: BufferCollectionTokenSyncResponder },
7935 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
7936 ///
7937 /// Normally a participant will convert a `BufferCollectionToken` into a
7938 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
7939 /// `Release` via the token (and then close the channel immediately or
7940 /// shortly later in response to server closing the server end), which
7941 /// avoids causing buffer collection failure. Without a prior `Release`,
7942 /// closing the `BufferCollectionToken` client end will cause buffer
7943 /// collection failure.
7944 ///
7945 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
7946 ///
7947 /// By default the server handles unexpected closure of a
7948 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
7949 /// first) by failing the buffer collection. Partly this is to expedite
7950 /// closing VMO handles to reclaim memory when any participant fails. If a
7951 /// participant would like to cleanly close a `BufferCollection` without
7952 /// causing buffer collection failure, the participant can send `Release`
7953 /// before closing the `BufferCollection` client end. The `Release` can
7954 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
7955 /// buffer collection won't require constraints from this node in order to
7956 /// allocate. If after `SetConstraints`, the constraints are retained and
7957 /// aggregated, despite the lack of `BufferCollection` connection at the
7958 /// time of constraints aggregation.
7959 ///
7960 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
7961 ///
7962 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
7963 /// end (without `Release` first) will trigger failure of the buffer
7964 /// collection. To close a `BufferCollectionTokenGroup` channel without
7965 /// failing the buffer collection, ensure that AllChildrenPresent() has been
7966 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
7967 /// client end.
7968 ///
7969 /// If `Release` occurs before
7970 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
7971 /// buffer collection will fail (triggered by reception of `Release` without
7972 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
7973 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
7974 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
7975 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
7976 /// close requires `AllChildrenPresent` (if not already sent), then
7977 /// `Release`, then close client end.
7978 ///
7979 /// If `Release` occurs after `AllChildrenPresent`, the children and all
7980 /// their constraints remain intact (just as they would if the
7981 /// `BufferCollectionTokenGroup` channel had remained open), and the client
7982 /// end close doesn't trigger buffer collection failure.
7983 ///
7984 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
7985 ///
7986 /// For brevity, the per-channel-protocol paragraphs above ignore the
7987 /// separate failure domain created by
7988 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
7989 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
7990 /// unexpectedly closes (without `Release` first) and that client end is
7991 /// under a failure domain, instead of failing the whole buffer collection,
7992 /// the failure domain is failed, but the buffer collection itself is
7993 /// isolated from failure of the failure domain. Such failure domains can be
7994 /// nested, in which case only the inner-most failure domain in which the
7995 /// `Node` resides fails.
7996 Release { control_handle: BufferCollectionTokenControlHandle },
7997 /// Set a name for VMOs in this buffer collection.
7998 ///
7999 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
8000 /// will be truncated to fit. The name of the vmo will be suffixed with the
8001 /// buffer index within the collection (if the suffix fits within
8002 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
8003 /// listed in the inspect data.
8004 ///
8005 /// The name only affects VMOs allocated after the name is set; this call
8006 /// does not rename existing VMOs. If multiple clients set different names
8007 /// then the larger priority value will win. Setting a new name with the
8008 /// same priority as a prior name doesn't change the name.
8009 ///
8010 /// All table fields are currently required.
8011 ///
8012 /// + request `priority` The name is only set if this is the first `SetName`
8013 /// or if `priority` is greater than any previous `priority` value in
8014 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
8015 /// + request `name` The name for VMOs created under this buffer collection.
8016 SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionTokenControlHandle },
8017 /// Set information about the current client that can be used by sysmem to
8018 /// help diagnose leaking memory and allocation stalls waiting for a
8019 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
8020 ///
8021 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
8022 /// `Node`(s) derived from this `Node`, unless overriden by
8023 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
8024 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
8025 ///
8026 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
8027 /// `Allocator` is the most efficient way to ensure that all
8028 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
8029 /// set, and is also more efficient than separately sending the same debug
8030 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
8031 /// created [`fuchsia.sysmem2/Node`].
8032 ///
8033 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
8034 /// indicate which client is closing their channel first, leading to subtree
8035 /// failure (which can be normal if the purpose of the subtree is over, but
8036 /// if happening earlier than expected, the client-channel-specific name can
8037 /// help diagnose where the failure is first coming from, from sysmem's
8038 /// point of view).
8039 ///
8040 /// All table fields are currently required.
8041 ///
8042 /// + request `name` This can be an arbitrary string, but the current
8043 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
8044 /// + request `id` This can be an arbitrary id, but the current process ID
8045 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
8046 SetDebugClientInfo {
8047 payload: NodeSetDebugClientInfoRequest,
8048 control_handle: BufferCollectionTokenControlHandle,
8049 },
8050 /// Sysmem logs a warning if sysmem hasn't seen
8051 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
8052 /// within 5 seconds after creation of a new collection.
8053 ///
8054 /// Clients can call this method to change when the log is printed. If
8055 /// multiple client set the deadline, it's unspecified which deadline will
8056 /// take effect.
8057 ///
8058 /// In most cases the default works well.
8059 ///
8060 /// All table fields are currently required.
8061 ///
8062 /// + request `deadline` The time at which sysmem will start trying to log
8063 /// the warning, unless all constraints are with sysmem by then.
8064 SetDebugTimeoutLogDeadline {
8065 payload: NodeSetDebugTimeoutLogDeadlineRequest,
8066 control_handle: BufferCollectionTokenControlHandle,
8067 },
8068 /// This enables verbose logging for the buffer collection.
8069 ///
8070 /// Verbose logging includes constraints set via
8071 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
8072 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
8073 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
8074 /// the tree of `Node`(s).
8075 ///
8076 /// Normally sysmem prints only a single line complaint when aggregation
8077 /// fails, with just the specific detailed reason that aggregation failed,
8078 /// with little surrounding context. While this is often enough to diagnose
8079 /// a problem if only a small change was made and everything was working
8080 /// before the small change, it's often not particularly helpful for getting
8081 /// a new buffer collection to work for the first time. Especially with
8082 /// more complex trees of nodes, involving things like
8083 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
8084 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
8085 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
8086 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
8087 /// looks like and why it's failing a logical allocation, or why a tree or
8088 /// subtree is failing sooner than expected.
8089 ///
8090 /// The intent of the extra logging is to be acceptable from a performance
8091 /// point of view, under the assumption that verbose logging is only enabled
8092 /// on a low number of buffer collections. If we're not tracking down a bug,
8093 /// we shouldn't send this message.
8094 SetVerboseLogging { control_handle: BufferCollectionTokenControlHandle },
8095 /// This gets a handle that can be used as a parameter to
8096 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
8097 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
8098 /// client obtained this handle from this `Node`.
8099 ///
8100 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
8101 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
8102 /// despite the two calls typically being on different channels.
8103 ///
8104 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
8105 ///
8106 /// All table fields are currently required.
8107 ///
8108 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
8109 /// different `Node` channel, to prove that the client obtained the handle
8110 /// from this `Node`.
8111 GetNodeRef { responder: BufferCollectionTokenGetNodeRefResponder },
8112 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
8113 /// rooted at a different child token of a common parent
8114 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
8115 /// passed-in `node_ref`.
8116 ///
8117 /// This call is for assisting with admission control de-duplication, and
8118 /// with debugging.
8119 ///
8120 /// The `node_ref` must be obtained using
8121 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
8122 ///
8123 /// The `node_ref` can be a duplicated handle; it's not necessary to call
8124 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
8125 ///
8126 /// If a calling token may not actually be a valid token at all due to a
8127 /// potentially hostile/untrusted provider of the token, call
8128 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
8129 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
8130 /// never responds due to a calling token not being a real token (not really
8131 /// talking to sysmem). Another option is to call
8132 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
8133 /// which also validates the token along with converting it to a
8134 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
8135 ///
8136 /// All table fields are currently required.
8137 ///
8138 /// - response `is_alternate`
8139 /// - true: The first parent node in common between the calling node and
8140 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
8141 /// that the calling `Node` and the `node_ref` `Node` will not have both
8142 /// their constraints apply - rather sysmem will choose one or the other
8143 /// of the constraints - never both. This is because only one child of
8144 /// a `BufferCollectionTokenGroup` is selected during logical
8145 /// allocation, with only that one child's subtree contributing to
8146 /// constraints aggregation.
8147 /// - false: The first parent node in common between the calling `Node`
8148 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
8149 /// Currently, this means the first parent node in common is a
8150 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
8151 /// `Release`ed). This means that the calling `Node` and the `node_ref`
8152 /// `Node` may have both their constraints apply during constraints
8153 /// aggregation of the logical allocation, if both `Node`(s) are
8154 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
8155 /// this case, there is no `BufferCollectionTokenGroup` that will
8156 /// directly prevent the two `Node`(s) from both being selected and
8157 /// their constraints both aggregated, but even when false, one or both
8158 /// `Node`(s) may still be eliminated from consideration if one or both
8159 /// `Node`(s) has a direct or indirect parent
8160 /// `BufferCollectionTokenGroup` which selects a child subtree other
8161 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
8162 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
8163 /// associated with the same buffer collection as the calling `Node`.
8164 /// Another reason for this error is if the `node_ref` is an
8165 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
8166 /// a real `node_ref` obtained from `GetNodeRef`.
8167 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
8168 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
8169 /// the needed rights expected on a real `node_ref`.
8170 /// * No other failing status codes are returned by this call. However,
8171 /// sysmem may add additional codes in future, so the client should have
8172 /// sensible default handling for any failing status code.
8173 IsAlternateFor {
8174 payload: NodeIsAlternateForRequest,
8175 responder: BufferCollectionTokenIsAlternateForResponder,
8176 },
8177 /// Get the buffer collection ID. This ID is also available from
8178 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
8179 /// within the collection).
8180 ///
8181 /// This call is mainly useful in situations where we can't convey a
8182 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
8183 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
8184 /// handle, which can be joined back up with a `BufferCollection` client end
8185 /// that was created via a different path. Prefer to convey a
8186 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
8187 ///
8188 /// Trusting a `buffer_collection_id` value from a source other than sysmem
8189 /// is analogous to trusting a koid value from a source other than zircon.
8190 /// Both should be avoided unless really necessary, and both require
8191 /// caution. In some situations it may be reasonable to refer to a
8192 /// pre-established `BufferCollection` by `buffer_collection_id` via a
8193 /// protocol for efficiency reasons, but an incoming value purporting to be
8194 /// a `buffer_collection_id` is not sufficient alone to justify granting the
8195 /// sender of the `buffer_collection_id` any capability. The sender must
8196 /// first prove to a receiver that the sender has/had a VMO or has/had a
8197 /// `BufferCollectionToken` to the same collection by sending a handle that
8198 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
8199 /// `buffer_collection_id` value. The receiver should take care to avoid
8200 /// assuming that a sender had a `BufferCollectionToken` in cases where the
8201 /// sender has only proven that the sender had a VMO.
8202 ///
8203 /// - response `buffer_collection_id` This ID is unique per buffer
8204 /// collection per boot. Each buffer is uniquely identified by the
8205 /// `buffer_collection_id` and `buffer_index` together.
8206 GetBufferCollectionId { responder: BufferCollectionTokenGetBufferCollectionIdResponder },
8207 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
8208 /// created after this message to weak, which means that a client's `Node`
8209 /// client end (or a child created after this message) is not alone
8210 /// sufficient to keep allocated VMOs alive.
8211 ///
8212 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
8213 /// `close_weak_asap`.
8214 ///
8215 /// This message is only permitted before the `Node` becomes ready for
8216 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
8217 /// * `BufferCollectionToken`: any time
8218 /// * `BufferCollection`: before `SetConstraints`
8219 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
8220 ///
8221 /// Currently, no conversion from strong `Node` to weak `Node` after ready
8222 /// for allocation is provided, but a client can simulate that by creating
8223 /// an additional `Node` before allocation and setting that additional
8224 /// `Node` to weak, and then potentially at some point later sending
8225 /// `Release` and closing the client end of the client's strong `Node`, but
8226 /// keeping the client's weak `Node`.
8227 ///
8228 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
8229 /// collection failure (all `Node` client end(s) will see
8230 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
8231 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
8232 /// this situation until all `Node`(s) are ready for allocation. For initial
8233 /// allocation to succeed, at least one strong `Node` is required to exist
8234 /// at allocation time, but after that client receives VMO handles, that
8235 /// client can `BufferCollection.Release` and close the client end without
8236 /// causing this type of failure.
8237 ///
8238 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
8239 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
8240 /// separately as appropriate.
8241 SetWeak { control_handle: BufferCollectionTokenControlHandle },
8242 /// This indicates to sysmem that the client is prepared to pay attention to
8243 /// `close_weak_asap`.
8244 ///
8245 /// If sent, this message must be before
8246 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
8247 ///
8248 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
8249 /// send this message before `WaitForAllBuffersAllocated`, or a parent
8250 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
8251 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
8252 /// trigger buffer collection failure.
8253 ///
8254 /// This message is necessary because weak sysmem VMOs have not always been
8255 /// a thing, so older clients are not aware of the need to pay attention to
8256 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
8257 /// sysmem weak VMO handles asap. By having this message and requiring
8258 /// participants to indicate their acceptance of this aspect of the overall
8259 /// protocol, we avoid situations where an older client is delivered a weak
8260 /// VMO without any way for sysmem to get that VMO to close quickly later
8261 /// (and on a per-buffer basis).
8262 ///
8263 /// A participant that doesn't handle `close_weak_asap` and also doesn't
8264 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
8265 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
8266 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
8267 /// same participant has a child/delegate which does retrieve VMOs, that
8268 /// child/delegate will need to send `SetWeakOk` before
8269 /// `WaitForAllBuffersAllocated`.
8270 ///
8271 /// + request `for_child_nodes_also` If present and true, this means direct
8272 /// child nodes of this node created after this message plus all
8273 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
8274 /// those nodes. Any child node of this node that was created before this
8275 /// message is not included. This setting is "sticky" in the sense that a
8276 /// subsequent `SetWeakOk` without this bool set to true does not reset
8277 /// the server-side bool. If this creates a problem for a participant, a
8278 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
8279 /// tokens instead, as appropriate. A participant should only set
8280 /// `for_child_nodes_also` true if the participant can really promise to
8281 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
8282 /// weak VMO handles held by participants holding the corresponding child
8283 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
8284 /// which are using sysmem(1) can be weak, despite the clients of those
8285 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
8286 /// direct way to find out about `close_weak_asap`. This only applies to
8287 /// descendents of this `Node` which are using sysmem(1), not to this
8288 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
8289 /// token, which will fail allocation unless an ancestor of this `Node`
8290 /// specified `for_child_nodes_also` true.
8291 SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: BufferCollectionTokenControlHandle },
8292 /// The server_end will be closed after this `Node` and any child nodes have
8293 /// have released their buffer counts, making those counts available for
8294 /// reservation by a different `Node` via
8295 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
8296 ///
8297 /// The `Node` buffer counts may not be released until the entire tree of
8298 /// `Node`(s) is closed or failed, because
8299 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
8300 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
8301 /// `Node` buffer counts remain reserved until the orphaned node is later
8302 /// cleaned up.
8303 ///
8304 /// If the `Node` exceeds a fairly large number of attached eventpair server
8305 /// ends, a log message will indicate this and the `Node` (and the
8306 /// appropriate) sub-tree will fail.
8307 ///
8308 /// The `server_end` will remain open when
8309 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
8310 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
8311 /// [`fuchsia.sysmem2/BufferCollection`].
8312 ///
8313 /// This message can also be used with a
8314 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
8315 AttachNodeTracking {
8316 payload: NodeAttachNodeTrackingRequest,
8317 control_handle: BufferCollectionTokenControlHandle,
8318 },
8319 /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
8320 /// one, referring to the same buffer collection.
8321 ///
8322 /// The created tokens are children of this token in the
8323 /// [`fuchsia.sysmem2/Node`] heirarchy.
8324 ///
8325 /// This method can be used to add more participants, by transferring the
8326 /// newly created tokens to additional participants.
8327 ///
8328 /// A new token will be returned for each entry in the
8329 /// `rights_attenuation_masks` array.
8330 ///
8331 /// If the called token may not actually be a valid token due to a
8332 /// potentially hostile/untrusted provider of the token, consider using
8333 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
8334 /// instead of potentially getting stuck indefinitely if
8335 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
8336 /// due to the calling token not being a real token.
8337 ///
8338 /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
8339 /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
8340 /// method, because the sync step is included in this call, at the cost of a
8341 /// round trip during this call.
8342 ///
8343 /// All tokens must be turned in to sysmem via
8344 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
8345 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
8346 /// successfully allocate buffers (or to logically allocate buffers in the
8347 /// case of subtrees involving
8348 /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
8349 ///
8350 /// All table fields are currently required.
8351 ///
8352 /// + request `rights_attenuation_mask` In each entry of
8353 /// `rights_attenuation_masks`, rights bits that are zero will be absent
8354 /// in the buffer VMO rights obtainable via the corresponding returned
8355 /// token. This allows an initiator or intermediary participant to
8356 /// attenuate the rights available to a participant. This does not allow a
8357 /// participant to gain rights that the participant doesn't already have.
8358 /// The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
8359 /// attenuation should be applied.
8360 /// - response `tokens` The client ends of each newly created token.
8361 DuplicateSync {
8362 payload: BufferCollectionTokenDuplicateSyncRequest,
8363 responder: BufferCollectionTokenDuplicateSyncResponder,
8364 },
8365 /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
8366 /// one, referring to the same buffer collection.
8367 ///
8368 /// The created token is a child of this token in the
8369 /// [`fuchsia.sysmem2/Node`] heirarchy.
8370 ///
8371 /// This method can be used to add a participant, by transferring the newly
8372 /// created token to another participant.
8373 ///
8374 /// This one-way message can be used instead of the two-way
8375 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
8376 /// performance sensitive cases where it would be undesireable to wait for
8377 /// sysmem to respond to
8378 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
8379 /// client code isn't structured to make it easy to duplicate all the needed
8380 /// tokens at once.
8381 ///
8382 /// After sending one or more `Duplicate` messages, and before sending the
8383 /// newly created child tokens to other participants (or to other
8384 /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
8385 /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
8386 /// `Sync` call can be made on the token, or on the `BufferCollection`
8387 /// obtained by passing this token to `BindSharedCollection`. Either will
8388 /// ensure that the server knows about the tokens created via `Duplicate`
8389 /// before the other participant sends the token to the server via separate
8390 /// `Allocator` channel.
8391 ///
8392 /// All tokens must be turned in via
8393 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
8394 /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
8395 /// successfully allocate buffers.
8396 ///
8397 /// All table fields are currently required.
8398 ///
8399 /// + request `rights_attenuation_mask` The rights bits that are zero in
8400 /// this mask will be absent in the buffer VMO rights obtainable via the
8401 /// client end of `token_request`. This allows an initiator or
8402 /// intermediary participant to attenuate the rights available to a
8403 /// delegate participant. This does not allow a participant to gain rights
8404 /// that the participant doesn't already have. The value
8405 /// `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
8406 /// should be applied.
8407 /// + These values for rights_attenuation_mask result in no attenuation:
8408 /// + `ZX_RIGHT_SAME_RIGHTS` (preferred)
8409 /// + 0xFFFFFFFF (this is reasonable when an attenuation mask is
8410 /// computed)
8411 /// + 0 (deprecated - do not use 0 - an ERROR will go to the log)
8412 /// + request `token_request` is the server end of a `BufferCollectionToken`
8413 /// channel. The client end of this channel acts as another participant in
8414 /// the shared buffer collection.
8415 Duplicate {
8416 payload: BufferCollectionTokenDuplicateRequest,
8417 control_handle: BufferCollectionTokenControlHandle,
8418 },
8419 /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
8420 ///
8421 /// When the `BufferCollectionToken` is converted to a
8422 /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
8423 /// the `BufferCollection` also.
8424 ///
8425 /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
8426 /// client end without having sent
8427 /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
8428 /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
8429 /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
8430 /// to the root `Node`, which fails the whole buffer collection. In
8431 /// contrast, a dispensable `Node` can fail after buffers are allocated
8432 /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
8433 /// heirarchy.
8434 ///
8435 /// The dispensable `Node` participates in constraints aggregation along
8436 /// with its parent before buffer allocation. If the dispensable `Node`
8437 /// fails before buffers are allocated, the failure propagates to the
8438 /// dispensable `Node`'s parent.
8439 ///
8440 /// After buffers are allocated, failure of the dispensable `Node` (or any
8441 /// child of the dispensable `Node`) does not propagate to the dispensable
8442 /// `Node`'s parent. Failure does propagate from a normal child of a
8443 /// dispensable `Node` to the dispensable `Node`. Failure of a child is
8444 /// blocked from reaching its parent if the child is attached using
8445 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
8446 /// dispensable and the failure occurred after allocation.
8447 ///
8448 /// A dispensable `Node` can be used in cases where a participant needs to
8449 /// provide constraints, but after buffers are allocated, the participant
8450 /// can fail without causing buffer collection failure from the parent
8451 /// `Node`'s point of view.
8452 ///
8453 /// In contrast, `BufferCollection.AttachToken` can be used to create a
8454 /// `BufferCollectionToken` which does not participate in constraints
8455 /// aggregation with its parent `Node`, and whose failure at any time does
8456 /// not propagate to its parent `Node`, and whose potential delay providing
8457 /// constraints does not prevent the parent `Node` from completing its
8458 /// buffer allocation.
8459 ///
8460 /// An initiator (creator of the root `Node` using
8461 /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
8462 /// scenarios choose to initially use a dispensable `Node` for a first
8463 /// instance of a participant, and then later if the first instance of that
8464 /// participant fails, a new second instance of that participant my be given
8465 /// a `BufferCollectionToken` created with `AttachToken`.
8466 ///
8467 /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
8468 /// shortly before sending the dispensable `BufferCollectionToken` to a
8469 /// delegate participant. Because `SetDispensable` prevents propagation of
8470 /// child `Node` failure to parent `Node`(s), if the client was relying on
8471 /// noticing child failure via failure of the parent `Node` retained by the
8472 /// client, the client may instead need to notice failure via other means.
8473 /// If other means aren't available/convenient, the client can instead
8474 /// retain the dispensable `Node` and create a child `Node` under that to
8475 /// send to the delegate participant, retaining this `Node` in order to
8476 /// notice failure of the subtree rooted at this `Node` via this `Node`'s
8477 /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
8478 /// (e.g. starting a new instance of the delegate participant and handing it
8479 /// a `BufferCollectionToken` created using
8480 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
8481 /// and clean up in a client-specific way).
8482 ///
8483 /// While it is possible (and potentially useful) to `SetDispensable` on a
8484 /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
8485 /// to later replace a failed dispensable `Node` that was a direct child of
8486 /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
8487 /// (since there's no `AttachToken` on a group). Instead, to enable
8488 /// `AttachToken` replacement in this case, create an additional
8489 /// non-dispensable token that's a direct child of the group and make the
8490 /// existing dispensable token a child of the additional token. This way,
8491 /// the additional token that is a direct child of the group has
8492 /// `BufferCollection.AttachToken` which can be used to replace the failed
8493 /// dispensable token.
8494 ///
8495 /// `SetDispensable` on an already-dispensable token is idempotent.
8496 SetDispensable { control_handle: BufferCollectionTokenControlHandle },
8497 /// Create a logical OR among a set of tokens, called a
8498 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
8499 ///
8500 /// Most sysmem clients and many participants don't need to care about this
8501 /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
8502 /// a participant wants to attempt to include one set of delegate
8503 /// participants, but if constraints don't combine successfully that way,
8504 /// fall back to a different (possibly overlapping) set of delegate
8505 /// participants, and/or fall back to a less demanding strategy (in terms of
8506 /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
8507 /// across all involved delegate participants). In such cases, a
8508 /// `BufferCollectionTokenGroup` is useful.
8509 ///
8510 /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
8511 /// child [`fuchsia.sysmem2/BufferCollectionToken`](s). The child tokens
8512 /// which are not selected during aggregation will fail (close), which a
8513 /// potential participant should notice when their `BufferCollection`
8514 /// channel client endpoint sees PEER_CLOSED, allowing the participant to
8515 /// clean up the speculative usage that didn't end up happening (this is
8516 /// simimlar to a normal `BufferCollection` server end closing on failure to
8517 /// allocate a logical buffer collection or later async failure of a buffer
8518 /// collection).
8519 ///
8520 /// See comments on protocol `BufferCollectionTokenGroup`.
8521 ///
8522 /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
8523 /// applied to the whole group can be achieved with a
8524 /// `BufferCollectionToken` for this purpose as a direct parent of the
8525 /// `BufferCollectionTokenGroup`.
8526 ///
8527 /// All table fields are currently required.
8528 ///
8529 /// + request `group_request` The server end of a
8530 /// `BufferCollectionTokenGroup` channel to be served by sysmem.
8531 CreateBufferCollectionTokenGroup {
8532 payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
8533 control_handle: BufferCollectionTokenControlHandle,
8534 },
8535 /// An interaction was received which does not match any known method.
8536 #[non_exhaustive]
8537 _UnknownMethod {
8538 /// Ordinal of the method that was called.
8539 ordinal: u64,
8540 control_handle: BufferCollectionTokenControlHandle,
8541 method_type: fidl::MethodType,
8542 },
8543}
8544
8545impl BufferCollectionTokenRequest {
8546 #[allow(irrefutable_let_patterns)]
8547 pub fn into_sync(self) -> Option<(BufferCollectionTokenSyncResponder)> {
8548 if let BufferCollectionTokenRequest::Sync { responder } = self {
8549 Some((responder))
8550 } else {
8551 None
8552 }
8553 }
8554
8555 #[allow(irrefutable_let_patterns)]
8556 pub fn into_release(self) -> Option<(BufferCollectionTokenControlHandle)> {
8557 if let BufferCollectionTokenRequest::Release { control_handle } = self {
8558 Some((control_handle))
8559 } else {
8560 None
8561 }
8562 }
8563
8564 #[allow(irrefutable_let_patterns)]
8565 pub fn into_set_name(self) -> Option<(NodeSetNameRequest, BufferCollectionTokenControlHandle)> {
8566 if let BufferCollectionTokenRequest::SetName { payload, control_handle } = self {
8567 Some((payload, control_handle))
8568 } else {
8569 None
8570 }
8571 }
8572
8573 #[allow(irrefutable_let_patterns)]
8574 pub fn into_set_debug_client_info(
8575 self,
8576 ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionTokenControlHandle)> {
8577 if let BufferCollectionTokenRequest::SetDebugClientInfo { payload, control_handle } = self {
8578 Some((payload, control_handle))
8579 } else {
8580 None
8581 }
8582 }
8583
8584 #[allow(irrefutable_let_patterns)]
8585 pub fn into_set_debug_timeout_log_deadline(
8586 self,
8587 ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionTokenControlHandle)> {
8588 if let BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline {
8589 payload,
8590 control_handle,
8591 } = self
8592 {
8593 Some((payload, control_handle))
8594 } else {
8595 None
8596 }
8597 }
8598
8599 #[allow(irrefutable_let_patterns)]
8600 pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionTokenControlHandle)> {
8601 if let BufferCollectionTokenRequest::SetVerboseLogging { control_handle } = self {
8602 Some((control_handle))
8603 } else {
8604 None
8605 }
8606 }
8607
8608 #[allow(irrefutable_let_patterns)]
8609 pub fn into_get_node_ref(self) -> Option<(BufferCollectionTokenGetNodeRefResponder)> {
8610 if let BufferCollectionTokenRequest::GetNodeRef { responder } = self {
8611 Some((responder))
8612 } else {
8613 None
8614 }
8615 }
8616
8617 #[allow(irrefutable_let_patterns)]
8618 pub fn into_is_alternate_for(
8619 self,
8620 ) -> Option<(NodeIsAlternateForRequest, BufferCollectionTokenIsAlternateForResponder)> {
8621 if let BufferCollectionTokenRequest::IsAlternateFor { payload, responder } = self {
8622 Some((payload, responder))
8623 } else {
8624 None
8625 }
8626 }
8627
8628 #[allow(irrefutable_let_patterns)]
8629 pub fn into_get_buffer_collection_id(
8630 self,
8631 ) -> Option<(BufferCollectionTokenGetBufferCollectionIdResponder)> {
8632 if let BufferCollectionTokenRequest::GetBufferCollectionId { responder } = self {
8633 Some((responder))
8634 } else {
8635 None
8636 }
8637 }
8638
8639 #[allow(irrefutable_let_patterns)]
8640 pub fn into_set_weak(self) -> Option<(BufferCollectionTokenControlHandle)> {
8641 if let BufferCollectionTokenRequest::SetWeak { control_handle } = self {
8642 Some((control_handle))
8643 } else {
8644 None
8645 }
8646 }
8647
8648 #[allow(irrefutable_let_patterns)]
8649 pub fn into_set_weak_ok(
8650 self,
8651 ) -> Option<(NodeSetWeakOkRequest, BufferCollectionTokenControlHandle)> {
8652 if let BufferCollectionTokenRequest::SetWeakOk { payload, control_handle } = self {
8653 Some((payload, control_handle))
8654 } else {
8655 None
8656 }
8657 }
8658
8659 #[allow(irrefutable_let_patterns)]
8660 pub fn into_attach_node_tracking(
8661 self,
8662 ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionTokenControlHandle)> {
8663 if let BufferCollectionTokenRequest::AttachNodeTracking { payload, control_handle } = self {
8664 Some((payload, control_handle))
8665 } else {
8666 None
8667 }
8668 }
8669
8670 #[allow(irrefutable_let_patterns)]
8671 pub fn into_duplicate_sync(
8672 self,
8673 ) -> Option<(
8674 BufferCollectionTokenDuplicateSyncRequest,
8675 BufferCollectionTokenDuplicateSyncResponder,
8676 )> {
8677 if let BufferCollectionTokenRequest::DuplicateSync { payload, responder } = self {
8678 Some((payload, responder))
8679 } else {
8680 None
8681 }
8682 }
8683
8684 #[allow(irrefutable_let_patterns)]
8685 pub fn into_duplicate(
8686 self,
8687 ) -> Option<(BufferCollectionTokenDuplicateRequest, BufferCollectionTokenControlHandle)> {
8688 if let BufferCollectionTokenRequest::Duplicate { payload, control_handle } = self {
8689 Some((payload, control_handle))
8690 } else {
8691 None
8692 }
8693 }
8694
8695 #[allow(irrefutable_let_patterns)]
8696 pub fn into_set_dispensable(self) -> Option<(BufferCollectionTokenControlHandle)> {
8697 if let BufferCollectionTokenRequest::SetDispensable { control_handle } = self {
8698 Some((control_handle))
8699 } else {
8700 None
8701 }
8702 }
8703
8704 #[allow(irrefutable_let_patterns)]
8705 pub fn into_create_buffer_collection_token_group(
8706 self,
8707 ) -> Option<(
8708 BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
8709 BufferCollectionTokenControlHandle,
8710 )> {
8711 if let BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup {
8712 payload,
8713 control_handle,
8714 } = self
8715 {
8716 Some((payload, control_handle))
8717 } else {
8718 None
8719 }
8720 }
8721
8722 /// Name of the method defined in FIDL
8723 pub fn method_name(&self) -> &'static str {
8724 match *self {
8725 BufferCollectionTokenRequest::Sync { .. } => "sync",
8726 BufferCollectionTokenRequest::Release { .. } => "release",
8727 BufferCollectionTokenRequest::SetName { .. } => "set_name",
8728 BufferCollectionTokenRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
8729 BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline { .. } => {
8730 "set_debug_timeout_log_deadline"
8731 }
8732 BufferCollectionTokenRequest::SetVerboseLogging { .. } => "set_verbose_logging",
8733 BufferCollectionTokenRequest::GetNodeRef { .. } => "get_node_ref",
8734 BufferCollectionTokenRequest::IsAlternateFor { .. } => "is_alternate_for",
8735 BufferCollectionTokenRequest::GetBufferCollectionId { .. } => {
8736 "get_buffer_collection_id"
8737 }
8738 BufferCollectionTokenRequest::SetWeak { .. } => "set_weak",
8739 BufferCollectionTokenRequest::SetWeakOk { .. } => "set_weak_ok",
8740 BufferCollectionTokenRequest::AttachNodeTracking { .. } => "attach_node_tracking",
8741 BufferCollectionTokenRequest::DuplicateSync { .. } => "duplicate_sync",
8742 BufferCollectionTokenRequest::Duplicate { .. } => "duplicate",
8743 BufferCollectionTokenRequest::SetDispensable { .. } => "set_dispensable",
8744 BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup { .. } => {
8745 "create_buffer_collection_token_group"
8746 }
8747 BufferCollectionTokenRequest::_UnknownMethod {
8748 method_type: fidl::MethodType::OneWay,
8749 ..
8750 } => "unknown one-way method",
8751 BufferCollectionTokenRequest::_UnknownMethod {
8752 method_type: fidl::MethodType::TwoWay,
8753 ..
8754 } => "unknown two-way method",
8755 }
8756 }
8757}
8758
8759#[derive(Debug, Clone)]
8760pub struct BufferCollectionTokenControlHandle {
8761 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
8762}
8763
8764impl fidl::endpoints::ControlHandle for BufferCollectionTokenControlHandle {
8765 fn shutdown(&self) {
8766 self.inner.shutdown()
8767 }
8768 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
8769 self.inner.shutdown_with_epitaph(status)
8770 }
8771
8772 fn is_closed(&self) -> bool {
8773 self.inner.channel().is_closed()
8774 }
8775 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
8776 self.inner.channel().on_closed()
8777 }
8778
8779 #[cfg(target_os = "fuchsia")]
8780 fn signal_peer(
8781 &self,
8782 clear_mask: zx::Signals,
8783 set_mask: zx::Signals,
8784 ) -> Result<(), zx_status::Status> {
8785 use fidl::Peered;
8786 self.inner.channel().signal_peer(clear_mask, set_mask)
8787 }
8788}
8789
8790impl BufferCollectionTokenControlHandle {}
8791
8792#[must_use = "FIDL methods require a response to be sent"]
8793#[derive(Debug)]
8794pub struct BufferCollectionTokenSyncResponder {
8795 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8796 tx_id: u32,
8797}
8798
8799/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8800/// if the responder is dropped without sending a response, so that the client
8801/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8802impl std::ops::Drop for BufferCollectionTokenSyncResponder {
8803 fn drop(&mut self) {
8804 self.control_handle.shutdown();
8805 // Safety: drops once, never accessed again
8806 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8807 }
8808}
8809
8810impl fidl::endpoints::Responder for BufferCollectionTokenSyncResponder {
8811 type ControlHandle = BufferCollectionTokenControlHandle;
8812
8813 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8814 &self.control_handle
8815 }
8816
8817 fn drop_without_shutdown(mut self) {
8818 // Safety: drops once, never accessed again due to mem::forget
8819 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8820 // Prevent Drop from running (which would shut down the channel)
8821 std::mem::forget(self);
8822 }
8823}
8824
8825impl BufferCollectionTokenSyncResponder {
8826 /// Sends a response to the FIDL transaction.
8827 ///
8828 /// Sets the channel to shutdown if an error occurs.
8829 pub fn send(self) -> Result<(), fidl::Error> {
8830 let _result = self.send_raw();
8831 if _result.is_err() {
8832 self.control_handle.shutdown();
8833 }
8834 self.drop_without_shutdown();
8835 _result
8836 }
8837
8838 /// Similar to "send" but does not shutdown the channel if an error occurs.
8839 pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
8840 let _result = self.send_raw();
8841 self.drop_without_shutdown();
8842 _result
8843 }
8844
8845 fn send_raw(&self) -> Result<(), fidl::Error> {
8846 self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
8847 fidl::encoding::Flexible::new(()),
8848 self.tx_id,
8849 0x11ac2555cf575b54,
8850 fidl::encoding::DynamicFlags::FLEXIBLE,
8851 )
8852 }
8853}
8854
8855#[must_use = "FIDL methods require a response to be sent"]
8856#[derive(Debug)]
8857pub struct BufferCollectionTokenGetNodeRefResponder {
8858 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8859 tx_id: u32,
8860}
8861
8862/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8863/// if the responder is dropped without sending a response, so that the client
8864/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8865impl std::ops::Drop for BufferCollectionTokenGetNodeRefResponder {
8866 fn drop(&mut self) {
8867 self.control_handle.shutdown();
8868 // Safety: drops once, never accessed again
8869 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8870 }
8871}
8872
8873impl fidl::endpoints::Responder for BufferCollectionTokenGetNodeRefResponder {
8874 type ControlHandle = BufferCollectionTokenControlHandle;
8875
8876 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8877 &self.control_handle
8878 }
8879
8880 fn drop_without_shutdown(mut self) {
8881 // Safety: drops once, never accessed again due to mem::forget
8882 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8883 // Prevent Drop from running (which would shut down the channel)
8884 std::mem::forget(self);
8885 }
8886}
8887
8888impl BufferCollectionTokenGetNodeRefResponder {
8889 /// Sends a response to the FIDL transaction.
8890 ///
8891 /// Sets the channel to shutdown if an error occurs.
8892 pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
8893 let _result = self.send_raw(payload);
8894 if _result.is_err() {
8895 self.control_handle.shutdown();
8896 }
8897 self.drop_without_shutdown();
8898 _result
8899 }
8900
8901 /// Similar to "send" but does not shutdown the channel if an error occurs.
8902 pub fn send_no_shutdown_on_err(
8903 self,
8904 mut payload: NodeGetNodeRefResponse,
8905 ) -> Result<(), fidl::Error> {
8906 let _result = self.send_raw(payload);
8907 self.drop_without_shutdown();
8908 _result
8909 }
8910
8911 fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
8912 self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
8913 fidl::encoding::Flexible::new(&mut payload),
8914 self.tx_id,
8915 0x5b3d0e51614df053,
8916 fidl::encoding::DynamicFlags::FLEXIBLE,
8917 )
8918 }
8919}
8920
8921#[must_use = "FIDL methods require a response to be sent"]
8922#[derive(Debug)]
8923pub struct BufferCollectionTokenIsAlternateForResponder {
8924 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8925 tx_id: u32,
8926}
8927
8928/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8929/// if the responder is dropped without sending a response, so that the client
8930/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8931impl std::ops::Drop for BufferCollectionTokenIsAlternateForResponder {
8932 fn drop(&mut self) {
8933 self.control_handle.shutdown();
8934 // Safety: drops once, never accessed again
8935 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8936 }
8937}
8938
8939impl fidl::endpoints::Responder for BufferCollectionTokenIsAlternateForResponder {
8940 type ControlHandle = BufferCollectionTokenControlHandle;
8941
8942 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8943 &self.control_handle
8944 }
8945
8946 fn drop_without_shutdown(mut self) {
8947 // Safety: drops once, never accessed again due to mem::forget
8948 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8949 // Prevent Drop from running (which would shut down the channel)
8950 std::mem::forget(self);
8951 }
8952}
8953
8954impl BufferCollectionTokenIsAlternateForResponder {
8955 /// Sends a response to the FIDL transaction.
8956 ///
8957 /// Sets the channel to shutdown if an error occurs.
8958 pub fn send(
8959 self,
8960 mut result: Result<&NodeIsAlternateForResponse, Error>,
8961 ) -> Result<(), fidl::Error> {
8962 let _result = self.send_raw(result);
8963 if _result.is_err() {
8964 self.control_handle.shutdown();
8965 }
8966 self.drop_without_shutdown();
8967 _result
8968 }
8969
8970 /// Similar to "send" but does not shutdown the channel if an error occurs.
8971 pub fn send_no_shutdown_on_err(
8972 self,
8973 mut result: Result<&NodeIsAlternateForResponse, Error>,
8974 ) -> Result<(), fidl::Error> {
8975 let _result = self.send_raw(result);
8976 self.drop_without_shutdown();
8977 _result
8978 }
8979
8980 fn send_raw(
8981 &self,
8982 mut result: Result<&NodeIsAlternateForResponse, Error>,
8983 ) -> Result<(), fidl::Error> {
8984 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
8985 NodeIsAlternateForResponse,
8986 Error,
8987 >>(
8988 fidl::encoding::FlexibleResult::new(result),
8989 self.tx_id,
8990 0x3a58e00157e0825,
8991 fidl::encoding::DynamicFlags::FLEXIBLE,
8992 )
8993 }
8994}
8995
8996#[must_use = "FIDL methods require a response to be sent"]
8997#[derive(Debug)]
8998pub struct BufferCollectionTokenGetBufferCollectionIdResponder {
8999 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
9000 tx_id: u32,
9001}
9002
9003/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
9004/// if the responder is dropped without sending a response, so that the client
9005/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
9006impl std::ops::Drop for BufferCollectionTokenGetBufferCollectionIdResponder {
9007 fn drop(&mut self) {
9008 self.control_handle.shutdown();
9009 // Safety: drops once, never accessed again
9010 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9011 }
9012}
9013
9014impl fidl::endpoints::Responder for BufferCollectionTokenGetBufferCollectionIdResponder {
9015 type ControlHandle = BufferCollectionTokenControlHandle;
9016
9017 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
9018 &self.control_handle
9019 }
9020
9021 fn drop_without_shutdown(mut self) {
9022 // Safety: drops once, never accessed again due to mem::forget
9023 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9024 // Prevent Drop from running (which would shut down the channel)
9025 std::mem::forget(self);
9026 }
9027}
9028
9029impl BufferCollectionTokenGetBufferCollectionIdResponder {
9030 /// Sends a response to the FIDL transaction.
9031 ///
9032 /// Sets the channel to shutdown if an error occurs.
9033 pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
9034 let _result = self.send_raw(payload);
9035 if _result.is_err() {
9036 self.control_handle.shutdown();
9037 }
9038 self.drop_without_shutdown();
9039 _result
9040 }
9041
9042 /// Similar to "send" but does not shutdown the channel if an error occurs.
9043 pub fn send_no_shutdown_on_err(
9044 self,
9045 mut payload: &NodeGetBufferCollectionIdResponse,
9046 ) -> Result<(), fidl::Error> {
9047 let _result = self.send_raw(payload);
9048 self.drop_without_shutdown();
9049 _result
9050 }
9051
9052 fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
9053 self.control_handle
9054 .inner
9055 .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
9056 fidl::encoding::Flexible::new(payload),
9057 self.tx_id,
9058 0x77d19a494b78ba8c,
9059 fidl::encoding::DynamicFlags::FLEXIBLE,
9060 )
9061 }
9062}
9063
9064#[must_use = "FIDL methods require a response to be sent"]
9065#[derive(Debug)]
9066pub struct BufferCollectionTokenDuplicateSyncResponder {
9067 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
9068 tx_id: u32,
9069}
9070
9071/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
9072/// if the responder is dropped without sending a response, so that the client
9073/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
9074impl std::ops::Drop for BufferCollectionTokenDuplicateSyncResponder {
9075 fn drop(&mut self) {
9076 self.control_handle.shutdown();
9077 // Safety: drops once, never accessed again
9078 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9079 }
9080}
9081
9082impl fidl::endpoints::Responder for BufferCollectionTokenDuplicateSyncResponder {
9083 type ControlHandle = BufferCollectionTokenControlHandle;
9084
9085 fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
9086 &self.control_handle
9087 }
9088
9089 fn drop_without_shutdown(mut self) {
9090 // Safety: drops once, never accessed again due to mem::forget
9091 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9092 // Prevent Drop from running (which would shut down the channel)
9093 std::mem::forget(self);
9094 }
9095}
9096
9097impl BufferCollectionTokenDuplicateSyncResponder {
9098 /// Sends a response to the FIDL transaction.
9099 ///
9100 /// Sets the channel to shutdown if an error occurs.
9101 pub fn send(
9102 self,
9103 mut payload: BufferCollectionTokenDuplicateSyncResponse,
9104 ) -> Result<(), fidl::Error> {
9105 let _result = self.send_raw(payload);
9106 if _result.is_err() {
9107 self.control_handle.shutdown();
9108 }
9109 self.drop_without_shutdown();
9110 _result
9111 }
9112
9113 /// Similar to "send" but does not shutdown the channel if an error occurs.
9114 pub fn send_no_shutdown_on_err(
9115 self,
9116 mut payload: BufferCollectionTokenDuplicateSyncResponse,
9117 ) -> Result<(), fidl::Error> {
9118 let _result = self.send_raw(payload);
9119 self.drop_without_shutdown();
9120 _result
9121 }
9122
9123 fn send_raw(
9124 &self,
9125 mut payload: BufferCollectionTokenDuplicateSyncResponse,
9126 ) -> Result<(), fidl::Error> {
9127 self.control_handle.inner.send::<fidl::encoding::FlexibleType<
9128 BufferCollectionTokenDuplicateSyncResponse,
9129 >>(
9130 fidl::encoding::Flexible::new(&mut payload),
9131 self.tx_id,
9132 0x1c1af9919d1ca45c,
9133 fidl::encoding::DynamicFlags::FLEXIBLE,
9134 )
9135 }
9136}
9137
9138#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
9139pub struct BufferCollectionTokenGroupMarker;
9140
9141impl fidl::endpoints::ProtocolMarker for BufferCollectionTokenGroupMarker {
9142 type Proxy = BufferCollectionTokenGroupProxy;
9143 type RequestStream = BufferCollectionTokenGroupRequestStream;
9144 #[cfg(target_os = "fuchsia")]
9145 type SynchronousProxy = BufferCollectionTokenGroupSynchronousProxy;
9146
9147 const DEBUG_NAME: &'static str = "(anonymous) BufferCollectionTokenGroup";
9148}
9149
9150pub trait BufferCollectionTokenGroupProxyInterface: Send + Sync {
9151 type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
9152 fn r#sync(&self) -> Self::SyncResponseFut;
9153 fn r#release(&self) -> Result<(), fidl::Error>;
9154 fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
9155 fn r#set_debug_client_info(
9156 &self,
9157 payload: &NodeSetDebugClientInfoRequest,
9158 ) -> Result<(), fidl::Error>;
9159 fn r#set_debug_timeout_log_deadline(
9160 &self,
9161 payload: &NodeSetDebugTimeoutLogDeadlineRequest,
9162 ) -> Result<(), fidl::Error>;
9163 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
9164 type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
9165 + Send;
9166 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
9167 type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
9168 + Send;
9169 fn r#is_alternate_for(
9170 &self,
9171 payload: NodeIsAlternateForRequest,
9172 ) -> Self::IsAlternateForResponseFut;
9173 type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
9174 + Send;
9175 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
9176 fn r#set_weak(&self) -> Result<(), fidl::Error>;
9177 fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
9178 fn r#attach_node_tracking(
9179 &self,
9180 payload: NodeAttachNodeTrackingRequest,
9181 ) -> Result<(), fidl::Error>;
9182 fn r#create_child(
9183 &self,
9184 payload: BufferCollectionTokenGroupCreateChildRequest,
9185 ) -> Result<(), fidl::Error>;
9186 type CreateChildrenSyncResponseFut: std::future::Future<
9187 Output = Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error>,
9188 > + Send;
9189 fn r#create_children_sync(
9190 &self,
9191 payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
9192 ) -> Self::CreateChildrenSyncResponseFut;
9193 fn r#all_children_present(&self) -> Result<(), fidl::Error>;
9194}
9195#[derive(Debug)]
9196#[cfg(target_os = "fuchsia")]
9197pub struct BufferCollectionTokenGroupSynchronousProxy {
9198 client: fidl::client::sync::Client,
9199}
9200
9201#[cfg(target_os = "fuchsia")]
9202impl fidl::endpoints::SynchronousProxy for BufferCollectionTokenGroupSynchronousProxy {
9203 type Proxy = BufferCollectionTokenGroupProxy;
9204 type Protocol = BufferCollectionTokenGroupMarker;
9205
9206 fn from_channel(inner: fidl::Channel) -> Self {
9207 Self::new(inner)
9208 }
9209
9210 fn into_channel(self) -> fidl::Channel {
9211 self.client.into_channel()
9212 }
9213
9214 fn as_channel(&self) -> &fidl::Channel {
9215 self.client.as_channel()
9216 }
9217}
9218
9219#[cfg(target_os = "fuchsia")]
9220impl BufferCollectionTokenGroupSynchronousProxy {
9221 pub fn new(channel: fidl::Channel) -> Self {
9222 let protocol_name =
9223 <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
9224 Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
9225 }
9226
9227 pub fn into_channel(self) -> fidl::Channel {
9228 self.client.into_channel()
9229 }
9230
9231 /// Waits until an event arrives and returns it. It is safe for other
9232 /// threads to make concurrent requests while waiting for an event.
9233 pub fn wait_for_event(
9234 &self,
9235 deadline: zx::MonotonicInstant,
9236 ) -> Result<BufferCollectionTokenGroupEvent, fidl::Error> {
9237 BufferCollectionTokenGroupEvent::decode(self.client.wait_for_event(deadline)?)
9238 }
9239
9240 /// Ensure that previous messages have been received server side. This is
9241 /// particularly useful after previous messages that created new tokens,
9242 /// because a token must be known to the sysmem server before sending the
9243 /// token to another participant.
9244 ///
9245 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
9246 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
9247 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
9248 /// to mitigate the possibility of a hostile/fake
9249 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
9250 /// Another way is to pass the token to
9251 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
9252 /// the token as part of exchanging it for a
9253 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
9254 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
9255 /// of stalling.
9256 ///
9257 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
9258 /// and then starting and completing a `Sync`, it's then safe to send the
9259 /// `BufferCollectionToken` client ends to other participants knowing the
9260 /// server will recognize the tokens when they're sent by the other
9261 /// participants to sysmem in a
9262 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
9263 /// efficient way to create tokens while avoiding unnecessary round trips.
9264 ///
9265 /// Other options include waiting for each
9266 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
9267 /// individually (using separate call to `Sync` after each), or calling
9268 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
9269 /// converted to a `BufferCollection` via
9270 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
9271 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
9272 /// the sync step and can create multiple tokens at once.
9273 pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
9274 let _response = self.client.send_query::<
9275 fidl::encoding::EmptyPayload,
9276 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
9277 >(
9278 (),
9279 0x11ac2555cf575b54,
9280 fidl::encoding::DynamicFlags::FLEXIBLE,
9281 ___deadline,
9282 )?
9283 .into_result::<BufferCollectionTokenGroupMarker>("sync")?;
9284 Ok(_response)
9285 }
9286
9287 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
9288 ///
9289 /// Normally a participant will convert a `BufferCollectionToken` into a
9290 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
9291 /// `Release` via the token (and then close the channel immediately or
9292 /// shortly later in response to server closing the server end), which
9293 /// avoids causing buffer collection failure. Without a prior `Release`,
9294 /// closing the `BufferCollectionToken` client end will cause buffer
9295 /// collection failure.
9296 ///
9297 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
9298 ///
9299 /// By default the server handles unexpected closure of a
9300 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
9301 /// first) by failing the buffer collection. Partly this is to expedite
9302 /// closing VMO handles to reclaim memory when any participant fails. If a
9303 /// participant would like to cleanly close a `BufferCollection` without
9304 /// causing buffer collection failure, the participant can send `Release`
9305 /// before closing the `BufferCollection` client end. The `Release` can
9306 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
9307 /// buffer collection won't require constraints from this node in order to
9308 /// allocate. If after `SetConstraints`, the constraints are retained and
9309 /// aggregated, despite the lack of `BufferCollection` connection at the
9310 /// time of constraints aggregation.
9311 ///
9312 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
9313 ///
9314 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
9315 /// end (without `Release` first) will trigger failure of the buffer
9316 /// collection. To close a `BufferCollectionTokenGroup` channel without
9317 /// failing the buffer collection, ensure that AllChildrenPresent() has been
9318 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
9319 /// client end.
9320 ///
9321 /// If `Release` occurs before
9322 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
9323 /// buffer collection will fail (triggered by reception of `Release` without
9324 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
9325 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
9326 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
9327 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
9328 /// close requires `AllChildrenPresent` (if not already sent), then
9329 /// `Release`, then close client end.
9330 ///
9331 /// If `Release` occurs after `AllChildrenPresent`, the children and all
9332 /// their constraints remain intact (just as they would if the
9333 /// `BufferCollectionTokenGroup` channel had remained open), and the client
9334 /// end close doesn't trigger buffer collection failure.
9335 ///
9336 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
9337 ///
9338 /// For brevity, the per-channel-protocol paragraphs above ignore the
9339 /// separate failure domain created by
9340 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
9341 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
9342 /// unexpectedly closes (without `Release` first) and that client end is
9343 /// under a failure domain, instead of failing the whole buffer collection,
9344 /// the failure domain is failed, but the buffer collection itself is
9345 /// isolated from failure of the failure domain. Such failure domains can be
9346 /// nested, in which case only the inner-most failure domain in which the
9347 /// `Node` resides fails.
9348 pub fn r#release(&self) -> Result<(), fidl::Error> {
9349 self.client.send::<fidl::encoding::EmptyPayload>(
9350 (),
9351 0x6a5cae7d6d6e04c6,
9352 fidl::encoding::DynamicFlags::FLEXIBLE,
9353 )
9354 }
9355
9356 /// Set a name for VMOs in this buffer collection.
9357 ///
9358 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
9359 /// will be truncated to fit. The name of the vmo will be suffixed with the
9360 /// buffer index within the collection (if the suffix fits within
9361 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
9362 /// listed in the inspect data.
9363 ///
9364 /// The name only affects VMOs allocated after the name is set; this call
9365 /// does not rename existing VMOs. If multiple clients set different names
9366 /// then the larger priority value will win. Setting a new name with the
9367 /// same priority as a prior name doesn't change the name.
9368 ///
9369 /// All table fields are currently required.
9370 ///
9371 /// + request `priority` The name is only set if this is the first `SetName`
9372 /// or if `priority` is greater than any previous `priority` value in
9373 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
9374 /// + request `name` The name for VMOs created under this buffer collection.
9375 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
9376 self.client.send::<NodeSetNameRequest>(
9377 payload,
9378 0xb41f1624f48c1e9,
9379 fidl::encoding::DynamicFlags::FLEXIBLE,
9380 )
9381 }
9382
9383 /// Set information about the current client that can be used by sysmem to
9384 /// help diagnose leaking memory and allocation stalls waiting for a
9385 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
9386 ///
9387 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
9388 /// `Node`(s) derived from this `Node`, unless overriden by
9389 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
9390 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
9391 ///
9392 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
9393 /// `Allocator` is the most efficient way to ensure that all
9394 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
9395 /// set, and is also more efficient than separately sending the same debug
9396 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
9397 /// created [`fuchsia.sysmem2/Node`].
9398 ///
9399 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
9400 /// indicate which client is closing their channel first, leading to subtree
9401 /// failure (which can be normal if the purpose of the subtree is over, but
9402 /// if happening earlier than expected, the client-channel-specific name can
9403 /// help diagnose where the failure is first coming from, from sysmem's
9404 /// point of view).
9405 ///
9406 /// All table fields are currently required.
9407 ///
9408 /// + request `name` This can be an arbitrary string, but the current
9409 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
9410 /// + request `id` This can be an arbitrary id, but the current process ID
9411 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
9412 pub fn r#set_debug_client_info(
9413 &self,
9414 mut payload: &NodeSetDebugClientInfoRequest,
9415 ) -> Result<(), fidl::Error> {
9416 self.client.send::<NodeSetDebugClientInfoRequest>(
9417 payload,
9418 0x5cde8914608d99b1,
9419 fidl::encoding::DynamicFlags::FLEXIBLE,
9420 )
9421 }
9422
9423 /// Sysmem logs a warning if sysmem hasn't seen
9424 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
9425 /// within 5 seconds after creation of a new collection.
9426 ///
9427 /// Clients can call this method to change when the log is printed. If
9428 /// multiple client set the deadline, it's unspecified which deadline will
9429 /// take effect.
9430 ///
9431 /// In most cases the default works well.
9432 ///
9433 /// All table fields are currently required.
9434 ///
9435 /// + request `deadline` The time at which sysmem will start trying to log
9436 /// the warning, unless all constraints are with sysmem by then.
9437 pub fn r#set_debug_timeout_log_deadline(
9438 &self,
9439 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
9440 ) -> Result<(), fidl::Error> {
9441 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
9442 payload,
9443 0x716b0af13d5c0806,
9444 fidl::encoding::DynamicFlags::FLEXIBLE,
9445 )
9446 }
9447
9448 /// This enables verbose logging for the buffer collection.
9449 ///
9450 /// Verbose logging includes constraints set via
9451 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
9452 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
9453 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
9454 /// the tree of `Node`(s).
9455 ///
9456 /// Normally sysmem prints only a single line complaint when aggregation
9457 /// fails, with just the specific detailed reason that aggregation failed,
9458 /// with little surrounding context. While this is often enough to diagnose
9459 /// a problem if only a small change was made and everything was working
9460 /// before the small change, it's often not particularly helpful for getting
9461 /// a new buffer collection to work for the first time. Especially with
9462 /// more complex trees of nodes, involving things like
9463 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
9464 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
9465 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
9466 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
9467 /// looks like and why it's failing a logical allocation, or why a tree or
9468 /// subtree is failing sooner than expected.
9469 ///
9470 /// The intent of the extra logging is to be acceptable from a performance
9471 /// point of view, under the assumption that verbose logging is only enabled
9472 /// on a low number of buffer collections. If we're not tracking down a bug,
9473 /// we shouldn't send this message.
9474 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
9475 self.client.send::<fidl::encoding::EmptyPayload>(
9476 (),
9477 0x5209c77415b4dfad,
9478 fidl::encoding::DynamicFlags::FLEXIBLE,
9479 )
9480 }
9481
9482 /// This gets a handle that can be used as a parameter to
9483 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
9484 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
9485 /// client obtained this handle from this `Node`.
9486 ///
9487 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
9488 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
9489 /// despite the two calls typically being on different channels.
9490 ///
9491 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
9492 ///
9493 /// All table fields are currently required.
9494 ///
9495 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
9496 /// different `Node` channel, to prove that the client obtained the handle
9497 /// from this `Node`.
9498 pub fn r#get_node_ref(
9499 &self,
9500 ___deadline: zx::MonotonicInstant,
9501 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
9502 let _response = self.client.send_query::<
9503 fidl::encoding::EmptyPayload,
9504 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
9505 >(
9506 (),
9507 0x5b3d0e51614df053,
9508 fidl::encoding::DynamicFlags::FLEXIBLE,
9509 ___deadline,
9510 )?
9511 .into_result::<BufferCollectionTokenGroupMarker>("get_node_ref")?;
9512 Ok(_response)
9513 }
9514
9515 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
9516 /// rooted at a different child token of a common parent
9517 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
9518 /// passed-in `node_ref`.
9519 ///
9520 /// This call is for assisting with admission control de-duplication, and
9521 /// with debugging.
9522 ///
9523 /// The `node_ref` must be obtained using
9524 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
9525 ///
9526 /// The `node_ref` can be a duplicated handle; it's not necessary to call
9527 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
9528 ///
9529 /// If a calling token may not actually be a valid token at all due to a
9530 /// potentially hostile/untrusted provider of the token, call
9531 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
9532 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
9533 /// never responds due to a calling token not being a real token (not really
9534 /// talking to sysmem). Another option is to call
9535 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
9536 /// which also validates the token along with converting it to a
9537 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
9538 ///
9539 /// All table fields are currently required.
9540 ///
9541 /// - response `is_alternate`
9542 /// - true: The first parent node in common between the calling node and
9543 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
9544 /// that the calling `Node` and the `node_ref` `Node` will not have both
9545 /// their constraints apply - rather sysmem will choose one or the other
9546 /// of the constraints - never both. This is because only one child of
9547 /// a `BufferCollectionTokenGroup` is selected during logical
9548 /// allocation, with only that one child's subtree contributing to
9549 /// constraints aggregation.
9550 /// - false: The first parent node in common between the calling `Node`
9551 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
9552 /// Currently, this means the first parent node in common is a
9553 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
9554 /// `Release`ed). This means that the calling `Node` and the `node_ref`
9555 /// `Node` may have both their constraints apply during constraints
9556 /// aggregation of the logical allocation, if both `Node`(s) are
9557 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
9558 /// this case, there is no `BufferCollectionTokenGroup` that will
9559 /// directly prevent the two `Node`(s) from both being selected and
9560 /// their constraints both aggregated, but even when false, one or both
9561 /// `Node`(s) may still be eliminated from consideration if one or both
9562 /// `Node`(s) has a direct or indirect parent
9563 /// `BufferCollectionTokenGroup` which selects a child subtree other
9564 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
9565 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
9566 /// associated with the same buffer collection as the calling `Node`.
9567 /// Another reason for this error is if the `node_ref` is an
9568 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
9569 /// a real `node_ref` obtained from `GetNodeRef`.
9570 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
9571 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
9572 /// the needed rights expected on a real `node_ref`.
9573 /// * No other failing status codes are returned by this call. However,
9574 /// sysmem may add additional codes in future, so the client should have
9575 /// sensible default handling for any failing status code.
9576 pub fn r#is_alternate_for(
9577 &self,
9578 mut payload: NodeIsAlternateForRequest,
9579 ___deadline: zx::MonotonicInstant,
9580 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
9581 let _response = self.client.send_query::<
9582 NodeIsAlternateForRequest,
9583 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
9584 >(
9585 &mut payload,
9586 0x3a58e00157e0825,
9587 fidl::encoding::DynamicFlags::FLEXIBLE,
9588 ___deadline,
9589 )?
9590 .into_result::<BufferCollectionTokenGroupMarker>("is_alternate_for")?;
9591 Ok(_response.map(|x| x))
9592 }
9593
9594 /// Get the buffer collection ID. This ID is also available from
9595 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
9596 /// within the collection).
9597 ///
9598 /// This call is mainly useful in situations where we can't convey a
9599 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
9600 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
9601 /// handle, which can be joined back up with a `BufferCollection` client end
9602 /// that was created via a different path. Prefer to convey a
9603 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
9604 ///
9605 /// Trusting a `buffer_collection_id` value from a source other than sysmem
9606 /// is analogous to trusting a koid value from a source other than zircon.
9607 /// Both should be avoided unless really necessary, and both require
9608 /// caution. In some situations it may be reasonable to refer to a
9609 /// pre-established `BufferCollection` by `buffer_collection_id` via a
9610 /// protocol for efficiency reasons, but an incoming value purporting to be
9611 /// a `buffer_collection_id` is not sufficient alone to justify granting the
9612 /// sender of the `buffer_collection_id` any capability. The sender must
9613 /// first prove to a receiver that the sender has/had a VMO or has/had a
9614 /// `BufferCollectionToken` to the same collection by sending a handle that
9615 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
9616 /// `buffer_collection_id` value. The receiver should take care to avoid
9617 /// assuming that a sender had a `BufferCollectionToken` in cases where the
9618 /// sender has only proven that the sender had a VMO.
9619 ///
9620 /// - response `buffer_collection_id` This ID is unique per buffer
9621 /// collection per boot. Each buffer is uniquely identified by the
9622 /// `buffer_collection_id` and `buffer_index` together.
9623 pub fn r#get_buffer_collection_id(
9624 &self,
9625 ___deadline: zx::MonotonicInstant,
9626 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
9627 let _response = self.client.send_query::<
9628 fidl::encoding::EmptyPayload,
9629 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
9630 >(
9631 (),
9632 0x77d19a494b78ba8c,
9633 fidl::encoding::DynamicFlags::FLEXIBLE,
9634 ___deadline,
9635 )?
9636 .into_result::<BufferCollectionTokenGroupMarker>("get_buffer_collection_id")?;
9637 Ok(_response)
9638 }
9639
9640 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
9641 /// created after this message to weak, which means that a client's `Node`
9642 /// client end (or a child created after this message) is not alone
9643 /// sufficient to keep allocated VMOs alive.
9644 ///
9645 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
9646 /// `close_weak_asap`.
9647 ///
9648 /// This message is only permitted before the `Node` becomes ready for
9649 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
9650 /// * `BufferCollectionToken`: any time
9651 /// * `BufferCollection`: before `SetConstraints`
9652 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
9653 ///
9654 /// Currently, no conversion from strong `Node` to weak `Node` after ready
9655 /// for allocation is provided, but a client can simulate that by creating
9656 /// an additional `Node` before allocation and setting that additional
9657 /// `Node` to weak, and then potentially at some point later sending
9658 /// `Release` and closing the client end of the client's strong `Node`, but
9659 /// keeping the client's weak `Node`.
9660 ///
9661 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
9662 /// collection failure (all `Node` client end(s) will see
9663 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
9664 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
9665 /// this situation until all `Node`(s) are ready for allocation. For initial
9666 /// allocation to succeed, at least one strong `Node` is required to exist
9667 /// at allocation time, but after that client receives VMO handles, that
9668 /// client can `BufferCollection.Release` and close the client end without
9669 /// causing this type of failure.
9670 ///
9671 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
9672 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
9673 /// separately as appropriate.
9674 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
9675 self.client.send::<fidl::encoding::EmptyPayload>(
9676 (),
9677 0x22dd3ea514eeffe1,
9678 fidl::encoding::DynamicFlags::FLEXIBLE,
9679 )
9680 }
9681
9682 /// This indicates to sysmem that the client is prepared to pay attention to
9683 /// `close_weak_asap`.
9684 ///
9685 /// If sent, this message must be before
9686 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
9687 ///
9688 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
9689 /// send this message before `WaitForAllBuffersAllocated`, or a parent
9690 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
9691 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
9692 /// trigger buffer collection failure.
9693 ///
9694 /// This message is necessary because weak sysmem VMOs have not always been
9695 /// a thing, so older clients are not aware of the need to pay attention to
9696 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
9697 /// sysmem weak VMO handles asap. By having this message and requiring
9698 /// participants to indicate their acceptance of this aspect of the overall
9699 /// protocol, we avoid situations where an older client is delivered a weak
9700 /// VMO without any way for sysmem to get that VMO to close quickly later
9701 /// (and on a per-buffer basis).
9702 ///
9703 /// A participant that doesn't handle `close_weak_asap` and also doesn't
9704 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
9705 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
9706 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
9707 /// same participant has a child/delegate which does retrieve VMOs, that
9708 /// child/delegate will need to send `SetWeakOk` before
9709 /// `WaitForAllBuffersAllocated`.
9710 ///
9711 /// + request `for_child_nodes_also` If present and true, this means direct
9712 /// child nodes of this node created after this message plus all
9713 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
9714 /// those nodes. Any child node of this node that was created before this
9715 /// message is not included. This setting is "sticky" in the sense that a
9716 /// subsequent `SetWeakOk` without this bool set to true does not reset
9717 /// the server-side bool. If this creates a problem for a participant, a
9718 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
9719 /// tokens instead, as appropriate. A participant should only set
9720 /// `for_child_nodes_also` true if the participant can really promise to
9721 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
9722 /// weak VMO handles held by participants holding the corresponding child
9723 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
9724 /// which are using sysmem(1) can be weak, despite the clients of those
9725 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
9726 /// direct way to find out about `close_weak_asap`. This only applies to
9727 /// descendents of this `Node` which are using sysmem(1), not to this
9728 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
9729 /// token, which will fail allocation unless an ancestor of this `Node`
9730 /// specified `for_child_nodes_also` true.
9731 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
9732 self.client.send::<NodeSetWeakOkRequest>(
9733 &mut payload,
9734 0x38a44fc4d7724be9,
9735 fidl::encoding::DynamicFlags::FLEXIBLE,
9736 )
9737 }
9738
9739 /// The server_end will be closed after this `Node` and any child nodes have
9740 /// have released their buffer counts, making those counts available for
9741 /// reservation by a different `Node` via
9742 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
9743 ///
9744 /// The `Node` buffer counts may not be released until the entire tree of
9745 /// `Node`(s) is closed or failed, because
9746 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
9747 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
9748 /// `Node` buffer counts remain reserved until the orphaned node is later
9749 /// cleaned up.
9750 ///
9751 /// If the `Node` exceeds a fairly large number of attached eventpair server
9752 /// ends, a log message will indicate this and the `Node` (and the
9753 /// appropriate) sub-tree will fail.
9754 ///
9755 /// The `server_end` will remain open when
9756 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
9757 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
9758 /// [`fuchsia.sysmem2/BufferCollection`].
9759 ///
9760 /// This message can also be used with a
9761 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
9762 pub fn r#attach_node_tracking(
9763 &self,
9764 mut payload: NodeAttachNodeTrackingRequest,
9765 ) -> Result<(), fidl::Error> {
9766 self.client.send::<NodeAttachNodeTrackingRequest>(
9767 &mut payload,
9768 0x3f22f2a293d3cdac,
9769 fidl::encoding::DynamicFlags::FLEXIBLE,
9770 )
9771 }
9772
9773 /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
9774 /// (including its children) will be selected during allocation (or logical
9775 /// allocation).
9776 ///
9777 /// Before passing the client end of this token to
9778 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
9779 /// [`fuchsia.sysmem2/Node.Sync`] after
9780 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
9781 /// Or the client can use
9782 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
9783 /// essentially includes the `Sync`.
9784 ///
9785 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
9786 /// fail the group's subtree and close the connection.
9787 ///
9788 /// After all children have been created, send AllChildrenPresent.
9789 ///
9790 /// + request `token_request` The server end of the new token channel.
9791 /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
9792 /// token allows the holder to get the same rights to buffers as the
9793 /// parent token (of the group) had. When the value isn't
9794 /// ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
9795 /// bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
9796 /// for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
9797 /// causes subtree failure.
9798 pub fn r#create_child(
9799 &self,
9800 mut payload: BufferCollectionTokenGroupCreateChildRequest,
9801 ) -> Result<(), fidl::Error> {
9802 self.client.send::<BufferCollectionTokenGroupCreateChildRequest>(
9803 &mut payload,
9804 0x41a0075d419f30c5,
9805 fidl::encoding::DynamicFlags::FLEXIBLE,
9806 )
9807 }
9808
9809 /// Create 1 or more child tokens at once, synchronously. In contrast to
9810 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
9811 /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
9812 /// of a returned token to
9813 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
9814 ///
9815 /// The lower-index child tokens are higher priority (attempted sooner) than
9816 /// higher-index child tokens.
9817 ///
9818 /// As per all child tokens, successful aggregation will choose exactly one
9819 /// child among all created children (across all children created across
9820 /// potentially multiple calls to
9821 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
9822 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
9823 ///
9824 /// The maximum permissible total number of children per group, and total
9825 /// number of nodes in an overall tree (from the root) are capped to limits
9826 /// which are not configurable via these protocols.
9827 ///
9828 /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
9829 /// this will fail the group's subtree and close the connection.
9830 ///
9831 /// After all children have been created, send AllChildrenPresent.
9832 ///
9833 /// + request `rights_attentuation_masks` The size of the
9834 /// `rights_attentuation_masks` determines the number of created child
9835 /// tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
9836 /// The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
9837 /// other value, each 0 bit in the mask attenuates that right.
9838 /// - response `tokens` The created child tokens.
9839 pub fn r#create_children_sync(
9840 &self,
9841 mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
9842 ___deadline: zx::MonotonicInstant,
9843 ) -> Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error> {
9844 let _response = self.client.send_query::<
9845 BufferCollectionTokenGroupCreateChildrenSyncRequest,
9846 fidl::encoding::FlexibleType<BufferCollectionTokenGroupCreateChildrenSyncResponse>,
9847 >(
9848 payload,
9849 0x15dea448c536070a,
9850 fidl::encoding::DynamicFlags::FLEXIBLE,
9851 ___deadline,
9852 )?
9853 .into_result::<BufferCollectionTokenGroupMarker>("create_children_sync")?;
9854 Ok(_response)
9855 }
9856
9857 /// Indicate that no more children will be created.
9858 ///
9859 /// After creating all children, the client should send
9860 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
9861 /// inform sysmem that no more children will be created, so that sysmem can
9862 /// know when it's ok to start aggregating constraints.
9863 ///
9864 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
9865 /// fail the group's subtree and close the connection.
9866 ///
9867 /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
9868 /// after `AllChildrenPresent`, else failure of the group's subtree will be
9869 /// triggered. This is intentionally not analogous to how `Release` without
9870 /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
9871 /// subtree failure.
9872 pub fn r#all_children_present(&self) -> Result<(), fidl::Error> {
9873 self.client.send::<fidl::encoding::EmptyPayload>(
9874 (),
9875 0x5c327e4a23391312,
9876 fidl::encoding::DynamicFlags::FLEXIBLE,
9877 )
9878 }
9879}
9880
9881#[cfg(target_os = "fuchsia")]
9882impl From<BufferCollectionTokenGroupSynchronousProxy> for zx::Handle {
9883 fn from(value: BufferCollectionTokenGroupSynchronousProxy) -> Self {
9884 value.into_channel().into()
9885 }
9886}
9887
9888#[cfg(target_os = "fuchsia")]
9889impl From<fidl::Channel> for BufferCollectionTokenGroupSynchronousProxy {
9890 fn from(value: fidl::Channel) -> Self {
9891 Self::new(value)
9892 }
9893}
9894
9895#[cfg(target_os = "fuchsia")]
9896impl fidl::endpoints::FromClient for BufferCollectionTokenGroupSynchronousProxy {
9897 type Protocol = BufferCollectionTokenGroupMarker;
9898
9899 fn from_client(value: fidl::endpoints::ClientEnd<BufferCollectionTokenGroupMarker>) -> Self {
9900 Self::new(value.into_channel())
9901 }
9902}
9903
9904#[derive(Debug, Clone)]
9905pub struct BufferCollectionTokenGroupProxy {
9906 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
9907}
9908
9909impl fidl::endpoints::Proxy for BufferCollectionTokenGroupProxy {
9910 type Protocol = BufferCollectionTokenGroupMarker;
9911
9912 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
9913 Self::new(inner)
9914 }
9915
9916 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
9917 self.client.into_channel().map_err(|client| Self { client })
9918 }
9919
9920 fn as_channel(&self) -> &::fidl::AsyncChannel {
9921 self.client.as_channel()
9922 }
9923}
9924
9925impl BufferCollectionTokenGroupProxy {
9926 /// Create a new Proxy for fuchsia.sysmem2/BufferCollectionTokenGroup.
9927 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
9928 let protocol_name =
9929 <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
9930 Self { client: fidl::client::Client::new(channel, protocol_name) }
9931 }
9932
9933 /// Get a Stream of events from the remote end of the protocol.
9934 ///
9935 /// # Panics
9936 ///
9937 /// Panics if the event stream was already taken.
9938 pub fn take_event_stream(&self) -> BufferCollectionTokenGroupEventStream {
9939 BufferCollectionTokenGroupEventStream { event_receiver: self.client.take_event_receiver() }
9940 }
9941
9942 /// Ensure that previous messages have been received server side. This is
9943 /// particularly useful after previous messages that created new tokens,
9944 /// because a token must be known to the sysmem server before sending the
9945 /// token to another participant.
9946 ///
9947 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
9948 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
9949 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
9950 /// to mitigate the possibility of a hostile/fake
9951 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
9952 /// Another way is to pass the token to
9953 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
9954 /// the token as part of exchanging it for a
9955 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
9956 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
9957 /// of stalling.
9958 ///
9959 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
9960 /// and then starting and completing a `Sync`, it's then safe to send the
9961 /// `BufferCollectionToken` client ends to other participants knowing the
9962 /// server will recognize the tokens when they're sent by the other
9963 /// participants to sysmem in a
9964 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
9965 /// efficient way to create tokens while avoiding unnecessary round trips.
9966 ///
9967 /// Other options include waiting for each
9968 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
9969 /// individually (using separate call to `Sync` after each), or calling
9970 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
9971 /// converted to a `BufferCollection` via
9972 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
9973 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
9974 /// the sync step and can create multiple tokens at once.
9975 pub fn r#sync(
9976 &self,
9977 ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
9978 BufferCollectionTokenGroupProxyInterface::r#sync(self)
9979 }
9980
9981 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
9982 ///
9983 /// Normally a participant will convert a `BufferCollectionToken` into a
9984 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
9985 /// `Release` via the token (and then close the channel immediately or
9986 /// shortly later in response to server closing the server end), which
9987 /// avoids causing buffer collection failure. Without a prior `Release`,
9988 /// closing the `BufferCollectionToken` client end will cause buffer
9989 /// collection failure.
9990 ///
9991 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
9992 ///
9993 /// By default the server handles unexpected closure of a
9994 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
9995 /// first) by failing the buffer collection. Partly this is to expedite
9996 /// closing VMO handles to reclaim memory when any participant fails. If a
9997 /// participant would like to cleanly close a `BufferCollection` without
9998 /// causing buffer collection failure, the participant can send `Release`
9999 /// before closing the `BufferCollection` client end. The `Release` can
10000 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
10001 /// buffer collection won't require constraints from this node in order to
10002 /// allocate. If after `SetConstraints`, the constraints are retained and
10003 /// aggregated, despite the lack of `BufferCollection` connection at the
10004 /// time of constraints aggregation.
10005 ///
10006 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
10007 ///
10008 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
10009 /// end (without `Release` first) will trigger failure of the buffer
10010 /// collection. To close a `BufferCollectionTokenGroup` channel without
10011 /// failing the buffer collection, ensure that AllChildrenPresent() has been
10012 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
10013 /// client end.
10014 ///
10015 /// If `Release` occurs before
10016 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
10017 /// buffer collection will fail (triggered by reception of `Release` without
10018 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
10019 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
10020 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
10021 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
10022 /// close requires `AllChildrenPresent` (if not already sent), then
10023 /// `Release`, then close client end.
10024 ///
10025 /// If `Release` occurs after `AllChildrenPresent`, the children and all
10026 /// their constraints remain intact (just as they would if the
10027 /// `BufferCollectionTokenGroup` channel had remained open), and the client
10028 /// end close doesn't trigger buffer collection failure.
10029 ///
10030 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
10031 ///
10032 /// For brevity, the per-channel-protocol paragraphs above ignore the
10033 /// separate failure domain created by
10034 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
10035 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
10036 /// unexpectedly closes (without `Release` first) and that client end is
10037 /// under a failure domain, instead of failing the whole buffer collection,
10038 /// the failure domain is failed, but the buffer collection itself is
10039 /// isolated from failure of the failure domain. Such failure domains can be
10040 /// nested, in which case only the inner-most failure domain in which the
10041 /// `Node` resides fails.
10042 pub fn r#release(&self) -> Result<(), fidl::Error> {
10043 BufferCollectionTokenGroupProxyInterface::r#release(self)
10044 }
10045
10046 /// Set a name for VMOs in this buffer collection.
10047 ///
10048 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
10049 /// will be truncated to fit. The name of the vmo will be suffixed with the
10050 /// buffer index within the collection (if the suffix fits within
10051 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
10052 /// listed in the inspect data.
10053 ///
10054 /// The name only affects VMOs allocated after the name is set; this call
10055 /// does not rename existing VMOs. If multiple clients set different names
10056 /// then the larger priority value will win. Setting a new name with the
10057 /// same priority as a prior name doesn't change the name.
10058 ///
10059 /// All table fields are currently required.
10060 ///
10061 /// + request `priority` The name is only set if this is the first `SetName`
10062 /// or if `priority` is greater than any previous `priority` value in
10063 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
10064 /// + request `name` The name for VMOs created under this buffer collection.
10065 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
10066 BufferCollectionTokenGroupProxyInterface::r#set_name(self, payload)
10067 }
10068
10069 /// Set information about the current client that can be used by sysmem to
10070 /// help diagnose leaking memory and allocation stalls waiting for a
10071 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
10072 ///
10073 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
10074 /// `Node`(s) derived from this `Node`, unless overriden by
10075 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
10076 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
10077 ///
10078 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
10079 /// `Allocator` is the most efficient way to ensure that all
10080 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
10081 /// set, and is also more efficient than separately sending the same debug
10082 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
10083 /// created [`fuchsia.sysmem2/Node`].
10084 ///
10085 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
10086 /// indicate which client is closing their channel first, leading to subtree
10087 /// failure (which can be normal if the purpose of the subtree is over, but
10088 /// if happening earlier than expected, the client-channel-specific name can
10089 /// help diagnose where the failure is first coming from, from sysmem's
10090 /// point of view).
10091 ///
10092 /// All table fields are currently required.
10093 ///
10094 /// + request `name` This can be an arbitrary string, but the current
10095 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
10096 /// + request `id` This can be an arbitrary id, but the current process ID
10097 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
10098 pub fn r#set_debug_client_info(
10099 &self,
10100 mut payload: &NodeSetDebugClientInfoRequest,
10101 ) -> Result<(), fidl::Error> {
10102 BufferCollectionTokenGroupProxyInterface::r#set_debug_client_info(self, payload)
10103 }
10104
10105 /// Sysmem logs a warning if sysmem hasn't seen
10106 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
10107 /// within 5 seconds after creation of a new collection.
10108 ///
10109 /// Clients can call this method to change when the log is printed. If
10110 /// multiple client set the deadline, it's unspecified which deadline will
10111 /// take effect.
10112 ///
10113 /// In most cases the default works well.
10114 ///
10115 /// All table fields are currently required.
10116 ///
10117 /// + request `deadline` The time at which sysmem will start trying to log
10118 /// the warning, unless all constraints are with sysmem by then.
10119 pub fn r#set_debug_timeout_log_deadline(
10120 &self,
10121 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
10122 ) -> Result<(), fidl::Error> {
10123 BufferCollectionTokenGroupProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
10124 }
10125
10126 /// This enables verbose logging for the buffer collection.
10127 ///
10128 /// Verbose logging includes constraints set via
10129 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
10130 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
10131 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
10132 /// the tree of `Node`(s).
10133 ///
10134 /// Normally sysmem prints only a single line complaint when aggregation
10135 /// fails, with just the specific detailed reason that aggregation failed,
10136 /// with little surrounding context. While this is often enough to diagnose
10137 /// a problem if only a small change was made and everything was working
10138 /// before the small change, it's often not particularly helpful for getting
10139 /// a new buffer collection to work for the first time. Especially with
10140 /// more complex trees of nodes, involving things like
10141 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
10142 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
10143 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
10144 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
10145 /// looks like and why it's failing a logical allocation, or why a tree or
10146 /// subtree is failing sooner than expected.
10147 ///
10148 /// The intent of the extra logging is to be acceptable from a performance
10149 /// point of view, under the assumption that verbose logging is only enabled
10150 /// on a low number of buffer collections. If we're not tracking down a bug,
10151 /// we shouldn't send this message.
10152 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
10153 BufferCollectionTokenGroupProxyInterface::r#set_verbose_logging(self)
10154 }
10155
10156 /// This gets a handle that can be used as a parameter to
10157 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
10158 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
10159 /// client obtained this handle from this `Node`.
10160 ///
10161 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
10162 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
10163 /// despite the two calls typically being on different channels.
10164 ///
10165 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
10166 ///
10167 /// All table fields are currently required.
10168 ///
10169 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
10170 /// different `Node` channel, to prove that the client obtained the handle
10171 /// from this `Node`.
10172 pub fn r#get_node_ref(
10173 &self,
10174 ) -> fidl::client::QueryResponseFut<
10175 NodeGetNodeRefResponse,
10176 fidl::encoding::DefaultFuchsiaResourceDialect,
10177 > {
10178 BufferCollectionTokenGroupProxyInterface::r#get_node_ref(self)
10179 }
10180
10181 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
10182 /// rooted at a different child token of a common parent
10183 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
10184 /// passed-in `node_ref`.
10185 ///
10186 /// This call is for assisting with admission control de-duplication, and
10187 /// with debugging.
10188 ///
10189 /// The `node_ref` must be obtained using
10190 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
10191 ///
10192 /// The `node_ref` can be a duplicated handle; it's not necessary to call
10193 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
10194 ///
10195 /// If a calling token may not actually be a valid token at all due to a
10196 /// potentially hostile/untrusted provider of the token, call
10197 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
10198 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
10199 /// never responds due to a calling token not being a real token (not really
10200 /// talking to sysmem). Another option is to call
10201 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
10202 /// which also validates the token along with converting it to a
10203 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
10204 ///
10205 /// All table fields are currently required.
10206 ///
10207 /// - response `is_alternate`
10208 /// - true: The first parent node in common between the calling node and
10209 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
10210 /// that the calling `Node` and the `node_ref` `Node` will not have both
10211 /// their constraints apply - rather sysmem will choose one or the other
10212 /// of the constraints - never both. This is because only one child of
10213 /// a `BufferCollectionTokenGroup` is selected during logical
10214 /// allocation, with only that one child's subtree contributing to
10215 /// constraints aggregation.
10216 /// - false: The first parent node in common between the calling `Node`
10217 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
10218 /// Currently, this means the first parent node in common is a
10219 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
10220 /// `Release`ed). This means that the calling `Node` and the `node_ref`
10221 /// `Node` may have both their constraints apply during constraints
10222 /// aggregation of the logical allocation, if both `Node`(s) are
10223 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
10224 /// this case, there is no `BufferCollectionTokenGroup` that will
10225 /// directly prevent the two `Node`(s) from both being selected and
10226 /// their constraints both aggregated, but even when false, one or both
10227 /// `Node`(s) may still be eliminated from consideration if one or both
10228 /// `Node`(s) has a direct or indirect parent
10229 /// `BufferCollectionTokenGroup` which selects a child subtree other
10230 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
10231 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
10232 /// associated with the same buffer collection as the calling `Node`.
10233 /// Another reason for this error is if the `node_ref` is an
10234 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
10235 /// a real `node_ref` obtained from `GetNodeRef`.
10236 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
10237 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
10238 /// the needed rights expected on a real `node_ref`.
10239 /// * No other failing status codes are returned by this call. However,
10240 /// sysmem may add additional codes in future, so the client should have
10241 /// sensible default handling for any failing status code.
10242 pub fn r#is_alternate_for(
10243 &self,
10244 mut payload: NodeIsAlternateForRequest,
10245 ) -> fidl::client::QueryResponseFut<
10246 NodeIsAlternateForResult,
10247 fidl::encoding::DefaultFuchsiaResourceDialect,
10248 > {
10249 BufferCollectionTokenGroupProxyInterface::r#is_alternate_for(self, payload)
10250 }
10251
10252 /// Get the buffer collection ID. This ID is also available from
10253 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
10254 /// within the collection).
10255 ///
10256 /// This call is mainly useful in situations where we can't convey a
10257 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
10258 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
10259 /// handle, which can be joined back up with a `BufferCollection` client end
10260 /// that was created via a different path. Prefer to convey a
10261 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
10262 ///
10263 /// Trusting a `buffer_collection_id` value from a source other than sysmem
10264 /// is analogous to trusting a koid value from a source other than zircon.
10265 /// Both should be avoided unless really necessary, and both require
10266 /// caution. In some situations it may be reasonable to refer to a
10267 /// pre-established `BufferCollection` by `buffer_collection_id` via a
10268 /// protocol for efficiency reasons, but an incoming value purporting to be
10269 /// a `buffer_collection_id` is not sufficient alone to justify granting the
10270 /// sender of the `buffer_collection_id` any capability. The sender must
10271 /// first prove to a receiver that the sender has/had a VMO or has/had a
10272 /// `BufferCollectionToken` to the same collection by sending a handle that
10273 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
10274 /// `buffer_collection_id` value. The receiver should take care to avoid
10275 /// assuming that a sender had a `BufferCollectionToken` in cases where the
10276 /// sender has only proven that the sender had a VMO.
10277 ///
10278 /// - response `buffer_collection_id` This ID is unique per buffer
10279 /// collection per boot. Each buffer is uniquely identified by the
10280 /// `buffer_collection_id` and `buffer_index` together.
10281 pub fn r#get_buffer_collection_id(
10282 &self,
10283 ) -> fidl::client::QueryResponseFut<
10284 NodeGetBufferCollectionIdResponse,
10285 fidl::encoding::DefaultFuchsiaResourceDialect,
10286 > {
10287 BufferCollectionTokenGroupProxyInterface::r#get_buffer_collection_id(self)
10288 }
10289
10290 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
10291 /// created after this message to weak, which means that a client's `Node`
10292 /// client end (or a child created after this message) is not alone
10293 /// sufficient to keep allocated VMOs alive.
10294 ///
10295 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
10296 /// `close_weak_asap`.
10297 ///
10298 /// This message is only permitted before the `Node` becomes ready for
10299 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
10300 /// * `BufferCollectionToken`: any time
10301 /// * `BufferCollection`: before `SetConstraints`
10302 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
10303 ///
10304 /// Currently, no conversion from strong `Node` to weak `Node` after ready
10305 /// for allocation is provided, but a client can simulate that by creating
10306 /// an additional `Node` before allocation and setting that additional
10307 /// `Node` to weak, and then potentially at some point later sending
10308 /// `Release` and closing the client end of the client's strong `Node`, but
10309 /// keeping the client's weak `Node`.
10310 ///
10311 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
10312 /// collection failure (all `Node` client end(s) will see
10313 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
10314 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
10315 /// this situation until all `Node`(s) are ready for allocation. For initial
10316 /// allocation to succeed, at least one strong `Node` is required to exist
10317 /// at allocation time, but after that client receives VMO handles, that
10318 /// client can `BufferCollection.Release` and close the client end without
10319 /// causing this type of failure.
10320 ///
10321 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
10322 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
10323 /// separately as appropriate.
10324 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
10325 BufferCollectionTokenGroupProxyInterface::r#set_weak(self)
10326 }
10327
10328 /// This indicates to sysmem that the client is prepared to pay attention to
10329 /// `close_weak_asap`.
10330 ///
10331 /// If sent, this message must be before
10332 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
10333 ///
10334 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
10335 /// send this message before `WaitForAllBuffersAllocated`, or a parent
10336 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
10337 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
10338 /// trigger buffer collection failure.
10339 ///
10340 /// This message is necessary because weak sysmem VMOs have not always been
10341 /// a thing, so older clients are not aware of the need to pay attention to
10342 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
10343 /// sysmem weak VMO handles asap. By having this message and requiring
10344 /// participants to indicate their acceptance of this aspect of the overall
10345 /// protocol, we avoid situations where an older client is delivered a weak
10346 /// VMO without any way for sysmem to get that VMO to close quickly later
10347 /// (and on a per-buffer basis).
10348 ///
10349 /// A participant that doesn't handle `close_weak_asap` and also doesn't
10350 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
10351 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
10352 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
10353 /// same participant has a child/delegate which does retrieve VMOs, that
10354 /// child/delegate will need to send `SetWeakOk` before
10355 /// `WaitForAllBuffersAllocated`.
10356 ///
10357 /// + request `for_child_nodes_also` If present and true, this means direct
10358 /// child nodes of this node created after this message plus all
10359 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
10360 /// those nodes. Any child node of this node that was created before this
10361 /// message is not included. This setting is "sticky" in the sense that a
10362 /// subsequent `SetWeakOk` without this bool set to true does not reset
10363 /// the server-side bool. If this creates a problem for a participant, a
10364 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
10365 /// tokens instead, as appropriate. A participant should only set
10366 /// `for_child_nodes_also` true if the participant can really promise to
10367 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
10368 /// weak VMO handles held by participants holding the corresponding child
10369 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
10370 /// which are using sysmem(1) can be weak, despite the clients of those
10371 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
10372 /// direct way to find out about `close_weak_asap`. This only applies to
10373 /// descendents of this `Node` which are using sysmem(1), not to this
10374 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
10375 /// token, which will fail allocation unless an ancestor of this `Node`
10376 /// specified `for_child_nodes_also` true.
10377 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
10378 BufferCollectionTokenGroupProxyInterface::r#set_weak_ok(self, payload)
10379 }
10380
10381 /// The server_end will be closed after this `Node` and any child nodes have
10382 /// have released their buffer counts, making those counts available for
10383 /// reservation by a different `Node` via
10384 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
10385 ///
10386 /// The `Node` buffer counts may not be released until the entire tree of
10387 /// `Node`(s) is closed or failed, because
10388 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
10389 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
10390 /// `Node` buffer counts remain reserved until the orphaned node is later
10391 /// cleaned up.
10392 ///
10393 /// If the `Node` exceeds a fairly large number of attached eventpair server
10394 /// ends, a log message will indicate this and the `Node` (and the
10395 /// appropriate) sub-tree will fail.
10396 ///
10397 /// The `server_end` will remain open when
10398 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
10399 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
10400 /// [`fuchsia.sysmem2/BufferCollection`].
10401 ///
10402 /// This message can also be used with a
10403 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
10404 pub fn r#attach_node_tracking(
10405 &self,
10406 mut payload: NodeAttachNodeTrackingRequest,
10407 ) -> Result<(), fidl::Error> {
10408 BufferCollectionTokenGroupProxyInterface::r#attach_node_tracking(self, payload)
10409 }
10410
10411 /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
10412 /// (including its children) will be selected during allocation (or logical
10413 /// allocation).
10414 ///
10415 /// Before passing the client end of this token to
10416 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
10417 /// [`fuchsia.sysmem2/Node.Sync`] after
10418 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
10419 /// Or the client can use
10420 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
10421 /// essentially includes the `Sync`.
10422 ///
10423 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
10424 /// fail the group's subtree and close the connection.
10425 ///
10426 /// After all children have been created, send AllChildrenPresent.
10427 ///
10428 /// + request `token_request` The server end of the new token channel.
10429 /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
10430 /// token allows the holder to get the same rights to buffers as the
10431 /// parent token (of the group) had. When the value isn't
10432 /// ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
10433 /// bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
10434 /// for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
10435 /// causes subtree failure.
10436 pub fn r#create_child(
10437 &self,
10438 mut payload: BufferCollectionTokenGroupCreateChildRequest,
10439 ) -> Result<(), fidl::Error> {
10440 BufferCollectionTokenGroupProxyInterface::r#create_child(self, payload)
10441 }
10442
10443 /// Create 1 or more child tokens at once, synchronously. In contrast to
10444 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
10445 /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
10446 /// of a returned token to
10447 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
10448 ///
10449 /// The lower-index child tokens are higher priority (attempted sooner) than
10450 /// higher-index child tokens.
10451 ///
10452 /// As per all child tokens, successful aggregation will choose exactly one
10453 /// child among all created children (across all children created across
10454 /// potentially multiple calls to
10455 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
10456 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
10457 ///
10458 /// The maximum permissible total number of children per group, and total
10459 /// number of nodes in an overall tree (from the root) are capped to limits
10460 /// which are not configurable via these protocols.
10461 ///
10462 /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
10463 /// this will fail the group's subtree and close the connection.
10464 ///
10465 /// After all children have been created, send AllChildrenPresent.
10466 ///
10467 /// + request `rights_attentuation_masks` The size of the
10468 /// `rights_attentuation_masks` determines the number of created child
10469 /// tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
10470 /// The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
10471 /// other value, each 0 bit in the mask attenuates that right.
10472 /// - response `tokens` The created child tokens.
10473 pub fn r#create_children_sync(
10474 &self,
10475 mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
10476 ) -> fidl::client::QueryResponseFut<
10477 BufferCollectionTokenGroupCreateChildrenSyncResponse,
10478 fidl::encoding::DefaultFuchsiaResourceDialect,
10479 > {
10480 BufferCollectionTokenGroupProxyInterface::r#create_children_sync(self, payload)
10481 }
10482
10483 /// Indicate that no more children will be created.
10484 ///
10485 /// After creating all children, the client should send
10486 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
10487 /// inform sysmem that no more children will be created, so that sysmem can
10488 /// know when it's ok to start aggregating constraints.
10489 ///
10490 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
10491 /// fail the group's subtree and close the connection.
10492 ///
10493 /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
10494 /// after `AllChildrenPresent`, else failure of the group's subtree will be
10495 /// triggered. This is intentionally not analogous to how `Release` without
10496 /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
10497 /// subtree failure.
10498 pub fn r#all_children_present(&self) -> Result<(), fidl::Error> {
10499 BufferCollectionTokenGroupProxyInterface::r#all_children_present(self)
10500 }
10501}
10502
10503impl BufferCollectionTokenGroupProxyInterface for BufferCollectionTokenGroupProxy {
10504 type SyncResponseFut =
10505 fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
10506 fn r#sync(&self) -> Self::SyncResponseFut {
10507 fn _decode(
10508 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10509 ) -> Result<(), fidl::Error> {
10510 let _response = fidl::client::decode_transaction_body::<
10511 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
10512 fidl::encoding::DefaultFuchsiaResourceDialect,
10513 0x11ac2555cf575b54,
10514 >(_buf?)?
10515 .into_result::<BufferCollectionTokenGroupMarker>("sync")?;
10516 Ok(_response)
10517 }
10518 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
10519 (),
10520 0x11ac2555cf575b54,
10521 fidl::encoding::DynamicFlags::FLEXIBLE,
10522 _decode,
10523 )
10524 }
10525
10526 fn r#release(&self) -> Result<(), fidl::Error> {
10527 self.client.send::<fidl::encoding::EmptyPayload>(
10528 (),
10529 0x6a5cae7d6d6e04c6,
10530 fidl::encoding::DynamicFlags::FLEXIBLE,
10531 )
10532 }
10533
10534 fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
10535 self.client.send::<NodeSetNameRequest>(
10536 payload,
10537 0xb41f1624f48c1e9,
10538 fidl::encoding::DynamicFlags::FLEXIBLE,
10539 )
10540 }
10541
10542 fn r#set_debug_client_info(
10543 &self,
10544 mut payload: &NodeSetDebugClientInfoRequest,
10545 ) -> Result<(), fidl::Error> {
10546 self.client.send::<NodeSetDebugClientInfoRequest>(
10547 payload,
10548 0x5cde8914608d99b1,
10549 fidl::encoding::DynamicFlags::FLEXIBLE,
10550 )
10551 }
10552
10553 fn r#set_debug_timeout_log_deadline(
10554 &self,
10555 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
10556 ) -> Result<(), fidl::Error> {
10557 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
10558 payload,
10559 0x716b0af13d5c0806,
10560 fidl::encoding::DynamicFlags::FLEXIBLE,
10561 )
10562 }
10563
10564 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
10565 self.client.send::<fidl::encoding::EmptyPayload>(
10566 (),
10567 0x5209c77415b4dfad,
10568 fidl::encoding::DynamicFlags::FLEXIBLE,
10569 )
10570 }
10571
10572 type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
10573 NodeGetNodeRefResponse,
10574 fidl::encoding::DefaultFuchsiaResourceDialect,
10575 >;
10576 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
10577 fn _decode(
10578 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10579 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
10580 let _response = fidl::client::decode_transaction_body::<
10581 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
10582 fidl::encoding::DefaultFuchsiaResourceDialect,
10583 0x5b3d0e51614df053,
10584 >(_buf?)?
10585 .into_result::<BufferCollectionTokenGroupMarker>("get_node_ref")?;
10586 Ok(_response)
10587 }
10588 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
10589 (),
10590 0x5b3d0e51614df053,
10591 fidl::encoding::DynamicFlags::FLEXIBLE,
10592 _decode,
10593 )
10594 }
10595
10596 type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
10597 NodeIsAlternateForResult,
10598 fidl::encoding::DefaultFuchsiaResourceDialect,
10599 >;
10600 fn r#is_alternate_for(
10601 &self,
10602 mut payload: NodeIsAlternateForRequest,
10603 ) -> Self::IsAlternateForResponseFut {
10604 fn _decode(
10605 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10606 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
10607 let _response = fidl::client::decode_transaction_body::<
10608 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
10609 fidl::encoding::DefaultFuchsiaResourceDialect,
10610 0x3a58e00157e0825,
10611 >(_buf?)?
10612 .into_result::<BufferCollectionTokenGroupMarker>("is_alternate_for")?;
10613 Ok(_response.map(|x| x))
10614 }
10615 self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
10616 &mut payload,
10617 0x3a58e00157e0825,
10618 fidl::encoding::DynamicFlags::FLEXIBLE,
10619 _decode,
10620 )
10621 }
10622
10623 type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
10624 NodeGetBufferCollectionIdResponse,
10625 fidl::encoding::DefaultFuchsiaResourceDialect,
10626 >;
10627 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
10628 fn _decode(
10629 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10630 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
10631 let _response = fidl::client::decode_transaction_body::<
10632 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
10633 fidl::encoding::DefaultFuchsiaResourceDialect,
10634 0x77d19a494b78ba8c,
10635 >(_buf?)?
10636 .into_result::<BufferCollectionTokenGroupMarker>("get_buffer_collection_id")?;
10637 Ok(_response)
10638 }
10639 self.client.send_query_and_decode::<
10640 fidl::encoding::EmptyPayload,
10641 NodeGetBufferCollectionIdResponse,
10642 >(
10643 (),
10644 0x77d19a494b78ba8c,
10645 fidl::encoding::DynamicFlags::FLEXIBLE,
10646 _decode,
10647 )
10648 }
10649
10650 fn r#set_weak(&self) -> Result<(), fidl::Error> {
10651 self.client.send::<fidl::encoding::EmptyPayload>(
10652 (),
10653 0x22dd3ea514eeffe1,
10654 fidl::encoding::DynamicFlags::FLEXIBLE,
10655 )
10656 }
10657
10658 fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
10659 self.client.send::<NodeSetWeakOkRequest>(
10660 &mut payload,
10661 0x38a44fc4d7724be9,
10662 fidl::encoding::DynamicFlags::FLEXIBLE,
10663 )
10664 }
10665
10666 fn r#attach_node_tracking(
10667 &self,
10668 mut payload: NodeAttachNodeTrackingRequest,
10669 ) -> Result<(), fidl::Error> {
10670 self.client.send::<NodeAttachNodeTrackingRequest>(
10671 &mut payload,
10672 0x3f22f2a293d3cdac,
10673 fidl::encoding::DynamicFlags::FLEXIBLE,
10674 )
10675 }
10676
10677 fn r#create_child(
10678 &self,
10679 mut payload: BufferCollectionTokenGroupCreateChildRequest,
10680 ) -> Result<(), fidl::Error> {
10681 self.client.send::<BufferCollectionTokenGroupCreateChildRequest>(
10682 &mut payload,
10683 0x41a0075d419f30c5,
10684 fidl::encoding::DynamicFlags::FLEXIBLE,
10685 )
10686 }
10687
10688 type CreateChildrenSyncResponseFut = fidl::client::QueryResponseFut<
10689 BufferCollectionTokenGroupCreateChildrenSyncResponse,
10690 fidl::encoding::DefaultFuchsiaResourceDialect,
10691 >;
10692 fn r#create_children_sync(
10693 &self,
10694 mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
10695 ) -> Self::CreateChildrenSyncResponseFut {
10696 fn _decode(
10697 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10698 ) -> Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error> {
10699 let _response = fidl::client::decode_transaction_body::<
10700 fidl::encoding::FlexibleType<BufferCollectionTokenGroupCreateChildrenSyncResponse>,
10701 fidl::encoding::DefaultFuchsiaResourceDialect,
10702 0x15dea448c536070a,
10703 >(_buf?)?
10704 .into_result::<BufferCollectionTokenGroupMarker>("create_children_sync")?;
10705 Ok(_response)
10706 }
10707 self.client.send_query_and_decode::<
10708 BufferCollectionTokenGroupCreateChildrenSyncRequest,
10709 BufferCollectionTokenGroupCreateChildrenSyncResponse,
10710 >(
10711 payload,
10712 0x15dea448c536070a,
10713 fidl::encoding::DynamicFlags::FLEXIBLE,
10714 _decode,
10715 )
10716 }
10717
10718 fn r#all_children_present(&self) -> Result<(), fidl::Error> {
10719 self.client.send::<fidl::encoding::EmptyPayload>(
10720 (),
10721 0x5c327e4a23391312,
10722 fidl::encoding::DynamicFlags::FLEXIBLE,
10723 )
10724 }
10725}
10726
10727pub struct BufferCollectionTokenGroupEventStream {
10728 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
10729}
10730
10731impl std::marker::Unpin for BufferCollectionTokenGroupEventStream {}
10732
10733impl futures::stream::FusedStream for BufferCollectionTokenGroupEventStream {
10734 fn is_terminated(&self) -> bool {
10735 self.event_receiver.is_terminated()
10736 }
10737}
10738
10739impl futures::Stream for BufferCollectionTokenGroupEventStream {
10740 type Item = Result<BufferCollectionTokenGroupEvent, fidl::Error>;
10741
10742 fn poll_next(
10743 mut self: std::pin::Pin<&mut Self>,
10744 cx: &mut std::task::Context<'_>,
10745 ) -> std::task::Poll<Option<Self::Item>> {
10746 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
10747 &mut self.event_receiver,
10748 cx
10749 )?) {
10750 Some(buf) => std::task::Poll::Ready(Some(BufferCollectionTokenGroupEvent::decode(buf))),
10751 None => std::task::Poll::Ready(None),
10752 }
10753 }
10754}
10755
10756#[derive(Debug)]
10757pub enum BufferCollectionTokenGroupEvent {
10758 #[non_exhaustive]
10759 _UnknownEvent {
10760 /// Ordinal of the event that was sent.
10761 ordinal: u64,
10762 },
10763}
10764
10765impl BufferCollectionTokenGroupEvent {
10766 /// Decodes a message buffer as a [`BufferCollectionTokenGroupEvent`].
10767 fn decode(
10768 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
10769 ) -> Result<BufferCollectionTokenGroupEvent, fidl::Error> {
10770 let (bytes, _handles) = buf.split_mut();
10771 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
10772 debug_assert_eq!(tx_header.tx_id, 0);
10773 match tx_header.ordinal {
10774 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
10775 Ok(BufferCollectionTokenGroupEvent::_UnknownEvent {
10776 ordinal: tx_header.ordinal,
10777 })
10778 }
10779 _ => Err(fidl::Error::UnknownOrdinal {
10780 ordinal: tx_header.ordinal,
10781 protocol_name: <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
10782 })
10783 }
10784 }
10785}
10786
10787/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollectionTokenGroup.
10788pub struct BufferCollectionTokenGroupRequestStream {
10789 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
10790 is_terminated: bool,
10791}
10792
10793impl std::marker::Unpin for BufferCollectionTokenGroupRequestStream {}
10794
10795impl futures::stream::FusedStream for BufferCollectionTokenGroupRequestStream {
10796 fn is_terminated(&self) -> bool {
10797 self.is_terminated
10798 }
10799}
10800
10801impl fidl::endpoints::RequestStream for BufferCollectionTokenGroupRequestStream {
10802 type Protocol = BufferCollectionTokenGroupMarker;
10803 type ControlHandle = BufferCollectionTokenGroupControlHandle;
10804
10805 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
10806 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
10807 }
10808
10809 fn control_handle(&self) -> Self::ControlHandle {
10810 BufferCollectionTokenGroupControlHandle { inner: self.inner.clone() }
10811 }
10812
10813 fn into_inner(
10814 self,
10815 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
10816 {
10817 (self.inner, self.is_terminated)
10818 }
10819
10820 fn from_inner(
10821 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
10822 is_terminated: bool,
10823 ) -> Self {
10824 Self { inner, is_terminated }
10825 }
10826}
10827
10828impl futures::Stream for BufferCollectionTokenGroupRequestStream {
10829 type Item = Result<BufferCollectionTokenGroupRequest, fidl::Error>;
10830
10831 fn poll_next(
10832 mut self: std::pin::Pin<&mut Self>,
10833 cx: &mut std::task::Context<'_>,
10834 ) -> std::task::Poll<Option<Self::Item>> {
10835 let this = &mut *self;
10836 if this.inner.check_shutdown(cx) {
10837 this.is_terminated = true;
10838 return std::task::Poll::Ready(None);
10839 }
10840 if this.is_terminated {
10841 panic!("polled BufferCollectionTokenGroupRequestStream after completion");
10842 }
10843 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
10844 |bytes, handles| {
10845 match this.inner.channel().read_etc(cx, bytes, handles) {
10846 std::task::Poll::Ready(Ok(())) => {}
10847 std::task::Poll::Pending => return std::task::Poll::Pending,
10848 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
10849 this.is_terminated = true;
10850 return std::task::Poll::Ready(None);
10851 }
10852 std::task::Poll::Ready(Err(e)) => {
10853 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
10854 e.into(),
10855 ))))
10856 }
10857 }
10858
10859 // A message has been received from the channel
10860 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
10861
10862 std::task::Poll::Ready(Some(match header.ordinal {
10863 0x11ac2555cf575b54 => {
10864 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10865 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10866 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10867 let control_handle = BufferCollectionTokenGroupControlHandle {
10868 inner: this.inner.clone(),
10869 };
10870 Ok(BufferCollectionTokenGroupRequest::Sync {
10871 responder: BufferCollectionTokenGroupSyncResponder {
10872 control_handle: std::mem::ManuallyDrop::new(control_handle),
10873 tx_id: header.tx_id,
10874 },
10875 })
10876 }
10877 0x6a5cae7d6d6e04c6 => {
10878 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10879 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10880 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10881 let control_handle = BufferCollectionTokenGroupControlHandle {
10882 inner: this.inner.clone(),
10883 };
10884 Ok(BufferCollectionTokenGroupRequest::Release {
10885 control_handle,
10886 })
10887 }
10888 0xb41f1624f48c1e9 => {
10889 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10890 let mut req = fidl::new_empty!(NodeSetNameRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10891 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
10892 let control_handle = BufferCollectionTokenGroupControlHandle {
10893 inner: this.inner.clone(),
10894 };
10895 Ok(BufferCollectionTokenGroupRequest::SetName {payload: req,
10896 control_handle,
10897 })
10898 }
10899 0x5cde8914608d99b1 => {
10900 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10901 let mut req = fidl::new_empty!(NodeSetDebugClientInfoRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10902 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
10903 let control_handle = BufferCollectionTokenGroupControlHandle {
10904 inner: this.inner.clone(),
10905 };
10906 Ok(BufferCollectionTokenGroupRequest::SetDebugClientInfo {payload: req,
10907 control_handle,
10908 })
10909 }
10910 0x716b0af13d5c0806 => {
10911 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10912 let mut req = fidl::new_empty!(NodeSetDebugTimeoutLogDeadlineRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10913 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
10914 let control_handle = BufferCollectionTokenGroupControlHandle {
10915 inner: this.inner.clone(),
10916 };
10917 Ok(BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline {payload: req,
10918 control_handle,
10919 })
10920 }
10921 0x5209c77415b4dfad => {
10922 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10923 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10924 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10925 let control_handle = BufferCollectionTokenGroupControlHandle {
10926 inner: this.inner.clone(),
10927 };
10928 Ok(BufferCollectionTokenGroupRequest::SetVerboseLogging {
10929 control_handle,
10930 })
10931 }
10932 0x5b3d0e51614df053 => {
10933 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10934 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10935 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10936 let control_handle = BufferCollectionTokenGroupControlHandle {
10937 inner: this.inner.clone(),
10938 };
10939 Ok(BufferCollectionTokenGroupRequest::GetNodeRef {
10940 responder: BufferCollectionTokenGroupGetNodeRefResponder {
10941 control_handle: std::mem::ManuallyDrop::new(control_handle),
10942 tx_id: header.tx_id,
10943 },
10944 })
10945 }
10946 0x3a58e00157e0825 => {
10947 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10948 let mut req = fidl::new_empty!(NodeIsAlternateForRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10949 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
10950 let control_handle = BufferCollectionTokenGroupControlHandle {
10951 inner: this.inner.clone(),
10952 };
10953 Ok(BufferCollectionTokenGroupRequest::IsAlternateFor {payload: req,
10954 responder: BufferCollectionTokenGroupIsAlternateForResponder {
10955 control_handle: std::mem::ManuallyDrop::new(control_handle),
10956 tx_id: header.tx_id,
10957 },
10958 })
10959 }
10960 0x77d19a494b78ba8c => {
10961 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10962 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10963 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10964 let control_handle = BufferCollectionTokenGroupControlHandle {
10965 inner: this.inner.clone(),
10966 };
10967 Ok(BufferCollectionTokenGroupRequest::GetBufferCollectionId {
10968 responder: BufferCollectionTokenGroupGetBufferCollectionIdResponder {
10969 control_handle: std::mem::ManuallyDrop::new(control_handle),
10970 tx_id: header.tx_id,
10971 },
10972 })
10973 }
10974 0x22dd3ea514eeffe1 => {
10975 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10976 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10977 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10978 let control_handle = BufferCollectionTokenGroupControlHandle {
10979 inner: this.inner.clone(),
10980 };
10981 Ok(BufferCollectionTokenGroupRequest::SetWeak {
10982 control_handle,
10983 })
10984 }
10985 0x38a44fc4d7724be9 => {
10986 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10987 let mut req = fidl::new_empty!(NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10988 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
10989 let control_handle = BufferCollectionTokenGroupControlHandle {
10990 inner: this.inner.clone(),
10991 };
10992 Ok(BufferCollectionTokenGroupRequest::SetWeakOk {payload: req,
10993 control_handle,
10994 })
10995 }
10996 0x3f22f2a293d3cdac => {
10997 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10998 let mut req = fidl::new_empty!(NodeAttachNodeTrackingRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10999 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
11000 let control_handle = BufferCollectionTokenGroupControlHandle {
11001 inner: this.inner.clone(),
11002 };
11003 Ok(BufferCollectionTokenGroupRequest::AttachNodeTracking {payload: req,
11004 control_handle,
11005 })
11006 }
11007 0x41a0075d419f30c5 => {
11008 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11009 let mut req = fidl::new_empty!(BufferCollectionTokenGroupCreateChildRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11010 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenGroupCreateChildRequest>(&header, _body_bytes, handles, &mut req)?;
11011 let control_handle = BufferCollectionTokenGroupControlHandle {
11012 inner: this.inner.clone(),
11013 };
11014 Ok(BufferCollectionTokenGroupRequest::CreateChild {payload: req,
11015 control_handle,
11016 })
11017 }
11018 0x15dea448c536070a => {
11019 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
11020 let mut req = fidl::new_empty!(BufferCollectionTokenGroupCreateChildrenSyncRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
11021 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenGroupCreateChildrenSyncRequest>(&header, _body_bytes, handles, &mut req)?;
11022 let control_handle = BufferCollectionTokenGroupControlHandle {
11023 inner: this.inner.clone(),
11024 };
11025 Ok(BufferCollectionTokenGroupRequest::CreateChildrenSync {payload: req,
11026 responder: BufferCollectionTokenGroupCreateChildrenSyncResponder {
11027 control_handle: std::mem::ManuallyDrop::new(control_handle),
11028 tx_id: header.tx_id,
11029 },
11030 })
11031 }
11032 0x5c327e4a23391312 => {
11033 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
11034 let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
11035 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
11036 let control_handle = BufferCollectionTokenGroupControlHandle {
11037 inner: this.inner.clone(),
11038 };
11039 Ok(BufferCollectionTokenGroupRequest::AllChildrenPresent {
11040 control_handle,
11041 })
11042 }
11043 _ if header.tx_id == 0 && header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
11044 Ok(BufferCollectionTokenGroupRequest::_UnknownMethod {
11045 ordinal: header.ordinal,
11046 control_handle: BufferCollectionTokenGroupControlHandle { inner: this.inner.clone() },
11047 method_type: fidl::MethodType::OneWay,
11048 })
11049 }
11050 _ if header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
11051 this.inner.send_framework_err(
11052 fidl::encoding::FrameworkErr::UnknownMethod,
11053 header.tx_id,
11054 header.ordinal,
11055 header.dynamic_flags(),
11056 (bytes, handles),
11057 )?;
11058 Ok(BufferCollectionTokenGroupRequest::_UnknownMethod {
11059 ordinal: header.ordinal,
11060 control_handle: BufferCollectionTokenGroupControlHandle { inner: this.inner.clone() },
11061 method_type: fidl::MethodType::TwoWay,
11062 })
11063 }
11064 _ => Err(fidl::Error::UnknownOrdinal {
11065 ordinal: header.ordinal,
11066 protocol_name: <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
11067 }),
11068 }))
11069 },
11070 )
11071 }
11072}
11073
11074/// The sysmem implementation is consistent with a logical / conceptual model of
11075/// allocation / logical allocation as follows:
11076///
11077/// As usual, a logical allocation considers either the root and all nodes with
11078/// connectivity to the root that don't transit a [`fuchsia.sysmem2/Node`]
11079/// created with [`fuchsia.sysmem2/BufferCollection.AttachToken`], or a subtree
11080/// rooted at an `AttachToken` `Node` and all `Node`(s) with connectivity to
11081/// that subtree that don't transit another `AttachToken`. This is called the
11082/// logical allocation pruned subtree, or pruned subtree for short.
11083///
11084/// During constraints aggregation, each
11085/// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] will select a single child
11086/// `Node` among its direct children. The rest of the children will appear to
11087/// fail the logical allocation, while the selected child may succeed.
11088///
11089/// When more than one `BufferCollectionTokenGroup` exists in the overall
11090/// logical allocation pruned subtree, the relative priority between two groups
11091/// is equivalent to their ordering in a DFS pre-order iteration of the tree,
11092/// with parents higher priority than children, and left children higher
11093/// priority than right children.
11094///
11095/// When a particular child of a group is selected (whether provisionally during
11096/// a constraints aggregation attempt, or as a final selection), the
11097/// non-selection of other children of the group will "hide" any other groups
11098/// under those non-selected children.
11099///
11100/// Within a logical allocation, aggregation is attempted first by provisionally
11101/// selecting child 0 of the highest-priority group, and child 0 of the next
11102/// highest-priority group that isn't hidden by the provisional selections so
11103/// far, etc.
11104///
11105/// If that aggregation attempt fails, aggregation will be attempted with the
11106/// ordinal 0 child of all the same groups except the lowest priority non-hidden
11107/// group which will provisionally select its ordinal 1 child (and then child 2
11108/// and so on). If a new lowest-priority group is un-hidden as provisional
11109/// selections are updated, that newly un-hidden lowest-priority group has all
11110/// its children considered in order, before changing the provisional selection
11111/// in the former lowest-priority group. In terms of result, this is equivalent
11112/// to systematic enumeration of all possible combinations of choices in a
11113/// counting-like order updating the lowest-priority group the most often and
11114/// the highest-priority group the least often. Rather than actually attempting
11115/// aggregation with all the combinations, we can skip over combinations which
11116/// are redundant/equivalent due to hiding without any change to the result.
11117///
11118/// Attempted constraint aggregations of enumerated non-equivalent combinations
11119/// of choices continue in this manner until either (a) all aggregation attempts
11120/// fail in which case the overall logical allocation fails, or (b) until an
11121/// attempted aggregation succeeds, in which case buffer allocation (if needed;
11122/// if this is the pruned subtree rooted at the overall root `Node`) is
11123/// attempted once. If buffer allocation based on the first successful
11124/// constraints aggregation fails, the overall logical allocation fails (there
11125/// is no buffer allocation retry / re-attempt). If buffer allocation succeeds
11126/// (or is not needed due to being a pruned subtree that doesn't include the
11127/// root), the logical allocation succeeds.
11128///
11129/// If this prioritization scheme cannot reasonably work for your usage of
11130/// sysmem, please don't hesitate to contact sysmem folks to discuss potentially
11131/// adding a way to achieve what you need.
11132///
11133/// Please avoid creating a large number of `BufferCollectionTokenGroup`(s) per
11134/// logical allocation, especially with large number of children overall, and
11135/// especially in cases where aggregation may reasonably be expected to often
11136/// fail using ordinal 0 children and possibly with later children as well.
11137/// Sysmem mitigates potentially high time complexity of evaluating too many
11138/// child combinations/selections across too many groups by simply failing
11139/// logical allocation beyond a certain (fairly high, but not huge) max number
11140/// of considered group child combinations/selections. More advanced (and more
11141/// complicated) mitigation is not anticipated to be practically necessary or
11142/// worth the added complexity. Please contact sysmem folks if the max limit is
11143/// getting hit or if you anticipate it getting hit, to discuss potential
11144/// options.
11145///
11146/// Prefer to use multiple [`fuchsia.sysmem2/ImageFormatConstraints`] in a
11147/// single [`fuchsia.sysmem2/BufferCollectionConstraints`] when feasible (when a
11148/// participant just needs to express the ability to work with more than a
11149/// single [`fuchsia.images2/PixelFormat`], with sysmem choosing which
11150/// `PixelFormat` to use among those supported by all participants).
11151///
11152/// Similar to [`fuchsia.sysmem2/BufferCollectionToken`] and
11153/// [`fuchsia.sysmem2/BufferCollection`], closure of the
11154/// `BufferCollectionTokenGroup` channel without sending
11155/// [`fuchsia.sysmem2/Node.Release`] first will cause buffer collection failure
11156/// (or subtree failure if using
11157/// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
11158/// [`fuchsia.sysmem2/BufferCollection.AttachToken`] and the
11159/// `BufferCollectionTokenGroup` is part of a subtree under such a node that
11160/// doesn't propagate failure to its parent).
11161///
11162/// Epitaphs are not used in this protocol.
11163#[derive(Debug)]
11164pub enum BufferCollectionTokenGroupRequest {
11165 /// Ensure that previous messages have been received server side. This is
11166 /// particularly useful after previous messages that created new tokens,
11167 /// because a token must be known to the sysmem server before sending the
11168 /// token to another participant.
11169 ///
11170 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
11171 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
11172 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
11173 /// to mitigate the possibility of a hostile/fake
11174 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
11175 /// Another way is to pass the token to
11176 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
11177 /// the token as part of exchanging it for a
11178 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
11179 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
11180 /// of stalling.
11181 ///
11182 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
11183 /// and then starting and completing a `Sync`, it's then safe to send the
11184 /// `BufferCollectionToken` client ends to other participants knowing the
11185 /// server will recognize the tokens when they're sent by the other
11186 /// participants to sysmem in a
11187 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
11188 /// efficient way to create tokens while avoiding unnecessary round trips.
11189 ///
11190 /// Other options include waiting for each
11191 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
11192 /// individually (using separate call to `Sync` after each), or calling
11193 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
11194 /// converted to a `BufferCollection` via
11195 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
11196 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
11197 /// the sync step and can create multiple tokens at once.
11198 Sync { responder: BufferCollectionTokenGroupSyncResponder },
11199 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
11200 ///
11201 /// Normally a participant will convert a `BufferCollectionToken` into a
11202 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
11203 /// `Release` via the token (and then close the channel immediately or
11204 /// shortly later in response to server closing the server end), which
11205 /// avoids causing buffer collection failure. Without a prior `Release`,
11206 /// closing the `BufferCollectionToken` client end will cause buffer
11207 /// collection failure.
11208 ///
11209 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
11210 ///
11211 /// By default the server handles unexpected closure of a
11212 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
11213 /// first) by failing the buffer collection. Partly this is to expedite
11214 /// closing VMO handles to reclaim memory when any participant fails. If a
11215 /// participant would like to cleanly close a `BufferCollection` without
11216 /// causing buffer collection failure, the participant can send `Release`
11217 /// before closing the `BufferCollection` client end. The `Release` can
11218 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
11219 /// buffer collection won't require constraints from this node in order to
11220 /// allocate. If after `SetConstraints`, the constraints are retained and
11221 /// aggregated, despite the lack of `BufferCollection` connection at the
11222 /// time of constraints aggregation.
11223 ///
11224 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
11225 ///
11226 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
11227 /// end (without `Release` first) will trigger failure of the buffer
11228 /// collection. To close a `BufferCollectionTokenGroup` channel without
11229 /// failing the buffer collection, ensure that AllChildrenPresent() has been
11230 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
11231 /// client end.
11232 ///
11233 /// If `Release` occurs before
11234 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
11235 /// buffer collection will fail (triggered by reception of `Release` without
11236 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
11237 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
11238 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
11239 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
11240 /// close requires `AllChildrenPresent` (if not already sent), then
11241 /// `Release`, then close client end.
11242 ///
11243 /// If `Release` occurs after `AllChildrenPresent`, the children and all
11244 /// their constraints remain intact (just as they would if the
11245 /// `BufferCollectionTokenGroup` channel had remained open), and the client
11246 /// end close doesn't trigger buffer collection failure.
11247 ///
11248 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
11249 ///
11250 /// For brevity, the per-channel-protocol paragraphs above ignore the
11251 /// separate failure domain created by
11252 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
11253 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
11254 /// unexpectedly closes (without `Release` first) and that client end is
11255 /// under a failure domain, instead of failing the whole buffer collection,
11256 /// the failure domain is failed, but the buffer collection itself is
11257 /// isolated from failure of the failure domain. Such failure domains can be
11258 /// nested, in which case only the inner-most failure domain in which the
11259 /// `Node` resides fails.
11260 Release { control_handle: BufferCollectionTokenGroupControlHandle },
11261 /// Set a name for VMOs in this buffer collection.
11262 ///
11263 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
11264 /// will be truncated to fit. The name of the vmo will be suffixed with the
11265 /// buffer index within the collection (if the suffix fits within
11266 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
11267 /// listed in the inspect data.
11268 ///
11269 /// The name only affects VMOs allocated after the name is set; this call
11270 /// does not rename existing VMOs. If multiple clients set different names
11271 /// then the larger priority value will win. Setting a new name with the
11272 /// same priority as a prior name doesn't change the name.
11273 ///
11274 /// All table fields are currently required.
11275 ///
11276 /// + request `priority` The name is only set if this is the first `SetName`
11277 /// or if `priority` is greater than any previous `priority` value in
11278 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
11279 /// + request `name` The name for VMOs created under this buffer collection.
11280 SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionTokenGroupControlHandle },
11281 /// Set information about the current client that can be used by sysmem to
11282 /// help diagnose leaking memory and allocation stalls waiting for a
11283 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
11284 ///
11285 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
11286 /// `Node`(s) derived from this `Node`, unless overriden by
11287 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
11288 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
11289 ///
11290 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
11291 /// `Allocator` is the most efficient way to ensure that all
11292 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
11293 /// set, and is also more efficient than separately sending the same debug
11294 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
11295 /// created [`fuchsia.sysmem2/Node`].
11296 ///
11297 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
11298 /// indicate which client is closing their channel first, leading to subtree
11299 /// failure (which can be normal if the purpose of the subtree is over, but
11300 /// if happening earlier than expected, the client-channel-specific name can
11301 /// help diagnose where the failure is first coming from, from sysmem's
11302 /// point of view).
11303 ///
11304 /// All table fields are currently required.
11305 ///
11306 /// + request `name` This can be an arbitrary string, but the current
11307 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
11308 /// + request `id` This can be an arbitrary id, but the current process ID
11309 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
11310 SetDebugClientInfo {
11311 payload: NodeSetDebugClientInfoRequest,
11312 control_handle: BufferCollectionTokenGroupControlHandle,
11313 },
11314 /// Sysmem logs a warning if sysmem hasn't seen
11315 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
11316 /// within 5 seconds after creation of a new collection.
11317 ///
11318 /// Clients can call this method to change when the log is printed. If
11319 /// multiple client set the deadline, it's unspecified which deadline will
11320 /// take effect.
11321 ///
11322 /// In most cases the default works well.
11323 ///
11324 /// All table fields are currently required.
11325 ///
11326 /// + request `deadline` The time at which sysmem will start trying to log
11327 /// the warning, unless all constraints are with sysmem by then.
11328 SetDebugTimeoutLogDeadline {
11329 payload: NodeSetDebugTimeoutLogDeadlineRequest,
11330 control_handle: BufferCollectionTokenGroupControlHandle,
11331 },
11332 /// This enables verbose logging for the buffer collection.
11333 ///
11334 /// Verbose logging includes constraints set via
11335 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
11336 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
11337 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
11338 /// the tree of `Node`(s).
11339 ///
11340 /// Normally sysmem prints only a single line complaint when aggregation
11341 /// fails, with just the specific detailed reason that aggregation failed,
11342 /// with little surrounding context. While this is often enough to diagnose
11343 /// a problem if only a small change was made and everything was working
11344 /// before the small change, it's often not particularly helpful for getting
11345 /// a new buffer collection to work for the first time. Especially with
11346 /// more complex trees of nodes, involving things like
11347 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
11348 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
11349 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
11350 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
11351 /// looks like and why it's failing a logical allocation, or why a tree or
11352 /// subtree is failing sooner than expected.
11353 ///
11354 /// The intent of the extra logging is to be acceptable from a performance
11355 /// point of view, under the assumption that verbose logging is only enabled
11356 /// on a low number of buffer collections. If we're not tracking down a bug,
11357 /// we shouldn't send this message.
11358 SetVerboseLogging { control_handle: BufferCollectionTokenGroupControlHandle },
11359 /// This gets a handle that can be used as a parameter to
11360 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
11361 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
11362 /// client obtained this handle from this `Node`.
11363 ///
11364 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
11365 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
11366 /// despite the two calls typically being on different channels.
11367 ///
11368 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
11369 ///
11370 /// All table fields are currently required.
11371 ///
11372 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
11373 /// different `Node` channel, to prove that the client obtained the handle
11374 /// from this `Node`.
11375 GetNodeRef { responder: BufferCollectionTokenGroupGetNodeRefResponder },
11376 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
11377 /// rooted at a different child token of a common parent
11378 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
11379 /// passed-in `node_ref`.
11380 ///
11381 /// This call is for assisting with admission control de-duplication, and
11382 /// with debugging.
11383 ///
11384 /// The `node_ref` must be obtained using
11385 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
11386 ///
11387 /// The `node_ref` can be a duplicated handle; it's not necessary to call
11388 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
11389 ///
11390 /// If a calling token may not actually be a valid token at all due to a
11391 /// potentially hostile/untrusted provider of the token, call
11392 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
11393 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
11394 /// never responds due to a calling token not being a real token (not really
11395 /// talking to sysmem). Another option is to call
11396 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
11397 /// which also validates the token along with converting it to a
11398 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
11399 ///
11400 /// All table fields are currently required.
11401 ///
11402 /// - response `is_alternate`
11403 /// - true: The first parent node in common between the calling node and
11404 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
11405 /// that the calling `Node` and the `node_ref` `Node` will not have both
11406 /// their constraints apply - rather sysmem will choose one or the other
11407 /// of the constraints - never both. This is because only one child of
11408 /// a `BufferCollectionTokenGroup` is selected during logical
11409 /// allocation, with only that one child's subtree contributing to
11410 /// constraints aggregation.
11411 /// - false: The first parent node in common between the calling `Node`
11412 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
11413 /// Currently, this means the first parent node in common is a
11414 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
11415 /// `Release`ed). This means that the calling `Node` and the `node_ref`
11416 /// `Node` may have both their constraints apply during constraints
11417 /// aggregation of the logical allocation, if both `Node`(s) are
11418 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
11419 /// this case, there is no `BufferCollectionTokenGroup` that will
11420 /// directly prevent the two `Node`(s) from both being selected and
11421 /// their constraints both aggregated, but even when false, one or both
11422 /// `Node`(s) may still be eliminated from consideration if one or both
11423 /// `Node`(s) has a direct or indirect parent
11424 /// `BufferCollectionTokenGroup` which selects a child subtree other
11425 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
11426 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
11427 /// associated with the same buffer collection as the calling `Node`.
11428 /// Another reason for this error is if the `node_ref` is an
11429 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
11430 /// a real `node_ref` obtained from `GetNodeRef`.
11431 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
11432 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
11433 /// the needed rights expected on a real `node_ref`.
11434 /// * No other failing status codes are returned by this call. However,
11435 /// sysmem may add additional codes in future, so the client should have
11436 /// sensible default handling for any failing status code.
11437 IsAlternateFor {
11438 payload: NodeIsAlternateForRequest,
11439 responder: BufferCollectionTokenGroupIsAlternateForResponder,
11440 },
11441 /// Get the buffer collection ID. This ID is also available from
11442 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
11443 /// within the collection).
11444 ///
11445 /// This call is mainly useful in situations where we can't convey a
11446 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
11447 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
11448 /// handle, which can be joined back up with a `BufferCollection` client end
11449 /// that was created via a different path. Prefer to convey a
11450 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
11451 ///
11452 /// Trusting a `buffer_collection_id` value from a source other than sysmem
11453 /// is analogous to trusting a koid value from a source other than zircon.
11454 /// Both should be avoided unless really necessary, and both require
11455 /// caution. In some situations it may be reasonable to refer to a
11456 /// pre-established `BufferCollection` by `buffer_collection_id` via a
11457 /// protocol for efficiency reasons, but an incoming value purporting to be
11458 /// a `buffer_collection_id` is not sufficient alone to justify granting the
11459 /// sender of the `buffer_collection_id` any capability. The sender must
11460 /// first prove to a receiver that the sender has/had a VMO or has/had a
11461 /// `BufferCollectionToken` to the same collection by sending a handle that
11462 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
11463 /// `buffer_collection_id` value. The receiver should take care to avoid
11464 /// assuming that a sender had a `BufferCollectionToken` in cases where the
11465 /// sender has only proven that the sender had a VMO.
11466 ///
11467 /// - response `buffer_collection_id` This ID is unique per buffer
11468 /// collection per boot. Each buffer is uniquely identified by the
11469 /// `buffer_collection_id` and `buffer_index` together.
11470 GetBufferCollectionId { responder: BufferCollectionTokenGroupGetBufferCollectionIdResponder },
11471 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
11472 /// created after this message to weak, which means that a client's `Node`
11473 /// client end (or a child created after this message) is not alone
11474 /// sufficient to keep allocated VMOs alive.
11475 ///
11476 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
11477 /// `close_weak_asap`.
11478 ///
11479 /// This message is only permitted before the `Node` becomes ready for
11480 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
11481 /// * `BufferCollectionToken`: any time
11482 /// * `BufferCollection`: before `SetConstraints`
11483 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
11484 ///
11485 /// Currently, no conversion from strong `Node` to weak `Node` after ready
11486 /// for allocation is provided, but a client can simulate that by creating
11487 /// an additional `Node` before allocation and setting that additional
11488 /// `Node` to weak, and then potentially at some point later sending
11489 /// `Release` and closing the client end of the client's strong `Node`, but
11490 /// keeping the client's weak `Node`.
11491 ///
11492 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
11493 /// collection failure (all `Node` client end(s) will see
11494 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
11495 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
11496 /// this situation until all `Node`(s) are ready for allocation. For initial
11497 /// allocation to succeed, at least one strong `Node` is required to exist
11498 /// at allocation time, but after that client receives VMO handles, that
11499 /// client can `BufferCollection.Release` and close the client end without
11500 /// causing this type of failure.
11501 ///
11502 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
11503 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
11504 /// separately as appropriate.
11505 SetWeak { control_handle: BufferCollectionTokenGroupControlHandle },
11506 /// This indicates to sysmem that the client is prepared to pay attention to
11507 /// `close_weak_asap`.
11508 ///
11509 /// If sent, this message must be before
11510 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
11511 ///
11512 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
11513 /// send this message before `WaitForAllBuffersAllocated`, or a parent
11514 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
11515 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
11516 /// trigger buffer collection failure.
11517 ///
11518 /// This message is necessary because weak sysmem VMOs have not always been
11519 /// a thing, so older clients are not aware of the need to pay attention to
11520 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
11521 /// sysmem weak VMO handles asap. By having this message and requiring
11522 /// participants to indicate their acceptance of this aspect of the overall
11523 /// protocol, we avoid situations where an older client is delivered a weak
11524 /// VMO without any way for sysmem to get that VMO to close quickly later
11525 /// (and on a per-buffer basis).
11526 ///
11527 /// A participant that doesn't handle `close_weak_asap` and also doesn't
11528 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
11529 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
11530 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
11531 /// same participant has a child/delegate which does retrieve VMOs, that
11532 /// child/delegate will need to send `SetWeakOk` before
11533 /// `WaitForAllBuffersAllocated`.
11534 ///
11535 /// + request `for_child_nodes_also` If present and true, this means direct
11536 /// child nodes of this node created after this message plus all
11537 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
11538 /// those nodes. Any child node of this node that was created before this
11539 /// message is not included. This setting is "sticky" in the sense that a
11540 /// subsequent `SetWeakOk` without this bool set to true does not reset
11541 /// the server-side bool. If this creates a problem for a participant, a
11542 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
11543 /// tokens instead, as appropriate. A participant should only set
11544 /// `for_child_nodes_also` true if the participant can really promise to
11545 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
11546 /// weak VMO handles held by participants holding the corresponding child
11547 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
11548 /// which are using sysmem(1) can be weak, despite the clients of those
11549 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
11550 /// direct way to find out about `close_weak_asap`. This only applies to
11551 /// descendents of this `Node` which are using sysmem(1), not to this
11552 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
11553 /// token, which will fail allocation unless an ancestor of this `Node`
11554 /// specified `for_child_nodes_also` true.
11555 SetWeakOk {
11556 payload: NodeSetWeakOkRequest,
11557 control_handle: BufferCollectionTokenGroupControlHandle,
11558 },
11559 /// The server_end will be closed after this `Node` and any child nodes have
11560 /// have released their buffer counts, making those counts available for
11561 /// reservation by a different `Node` via
11562 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
11563 ///
11564 /// The `Node` buffer counts may not be released until the entire tree of
11565 /// `Node`(s) is closed or failed, because
11566 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
11567 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
11568 /// `Node` buffer counts remain reserved until the orphaned node is later
11569 /// cleaned up.
11570 ///
11571 /// If the `Node` exceeds a fairly large number of attached eventpair server
11572 /// ends, a log message will indicate this and the `Node` (and the
11573 /// appropriate) sub-tree will fail.
11574 ///
11575 /// The `server_end` will remain open when
11576 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
11577 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
11578 /// [`fuchsia.sysmem2/BufferCollection`].
11579 ///
11580 /// This message can also be used with a
11581 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
11582 AttachNodeTracking {
11583 payload: NodeAttachNodeTrackingRequest,
11584 control_handle: BufferCollectionTokenGroupControlHandle,
11585 },
11586 /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
11587 /// (including its children) will be selected during allocation (or logical
11588 /// allocation).
11589 ///
11590 /// Before passing the client end of this token to
11591 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
11592 /// [`fuchsia.sysmem2/Node.Sync`] after
11593 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
11594 /// Or the client can use
11595 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
11596 /// essentially includes the `Sync`.
11597 ///
11598 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
11599 /// fail the group's subtree and close the connection.
11600 ///
11601 /// After all children have been created, send AllChildrenPresent.
11602 ///
11603 /// + request `token_request` The server end of the new token channel.
11604 /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
11605 /// token allows the holder to get the same rights to buffers as the
11606 /// parent token (of the group) had. When the value isn't
11607 /// ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
11608 /// bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
11609 /// for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
11610 /// causes subtree failure.
11611 CreateChild {
11612 payload: BufferCollectionTokenGroupCreateChildRequest,
11613 control_handle: BufferCollectionTokenGroupControlHandle,
11614 },
11615 /// Create 1 or more child tokens at once, synchronously. In contrast to
11616 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
11617 /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
11618 /// of a returned token to
11619 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
11620 ///
11621 /// The lower-index child tokens are higher priority (attempted sooner) than
11622 /// higher-index child tokens.
11623 ///
11624 /// As per all child tokens, successful aggregation will choose exactly one
11625 /// child among all created children (across all children created across
11626 /// potentially multiple calls to
11627 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
11628 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
11629 ///
11630 /// The maximum permissible total number of children per group, and total
11631 /// number of nodes in an overall tree (from the root) are capped to limits
11632 /// which are not configurable via these protocols.
11633 ///
11634 /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
11635 /// this will fail the group's subtree and close the connection.
11636 ///
11637 /// After all children have been created, send AllChildrenPresent.
11638 ///
11639 /// + request `rights_attentuation_masks` The size of the
11640 /// `rights_attentuation_masks` determines the number of created child
11641 /// tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
11642 /// The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
11643 /// other value, each 0 bit in the mask attenuates that right.
11644 /// - response `tokens` The created child tokens.
11645 CreateChildrenSync {
11646 payload: BufferCollectionTokenGroupCreateChildrenSyncRequest,
11647 responder: BufferCollectionTokenGroupCreateChildrenSyncResponder,
11648 },
11649 /// Indicate that no more children will be created.
11650 ///
11651 /// After creating all children, the client should send
11652 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
11653 /// inform sysmem that no more children will be created, so that sysmem can
11654 /// know when it's ok to start aggregating constraints.
11655 ///
11656 /// Sending CreateChild after AllChildrenPresent is not permitted; this will
11657 /// fail the group's subtree and close the connection.
11658 ///
11659 /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
11660 /// after `AllChildrenPresent`, else failure of the group's subtree will be
11661 /// triggered. This is intentionally not analogous to how `Release` without
11662 /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
11663 /// subtree failure.
11664 AllChildrenPresent { control_handle: BufferCollectionTokenGroupControlHandle },
11665 /// An interaction was received which does not match any known method.
11666 #[non_exhaustive]
11667 _UnknownMethod {
11668 /// Ordinal of the method that was called.
11669 ordinal: u64,
11670 control_handle: BufferCollectionTokenGroupControlHandle,
11671 method_type: fidl::MethodType,
11672 },
11673}
11674
11675impl BufferCollectionTokenGroupRequest {
11676 #[allow(irrefutable_let_patterns)]
11677 pub fn into_sync(self) -> Option<(BufferCollectionTokenGroupSyncResponder)> {
11678 if let BufferCollectionTokenGroupRequest::Sync { responder } = self {
11679 Some((responder))
11680 } else {
11681 None
11682 }
11683 }
11684
11685 #[allow(irrefutable_let_patterns)]
11686 pub fn into_release(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11687 if let BufferCollectionTokenGroupRequest::Release { control_handle } = self {
11688 Some((control_handle))
11689 } else {
11690 None
11691 }
11692 }
11693
11694 #[allow(irrefutable_let_patterns)]
11695 pub fn into_set_name(
11696 self,
11697 ) -> Option<(NodeSetNameRequest, BufferCollectionTokenGroupControlHandle)> {
11698 if let BufferCollectionTokenGroupRequest::SetName { payload, control_handle } = self {
11699 Some((payload, control_handle))
11700 } else {
11701 None
11702 }
11703 }
11704
11705 #[allow(irrefutable_let_patterns)]
11706 pub fn into_set_debug_client_info(
11707 self,
11708 ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionTokenGroupControlHandle)> {
11709 if let BufferCollectionTokenGroupRequest::SetDebugClientInfo { payload, control_handle } =
11710 self
11711 {
11712 Some((payload, control_handle))
11713 } else {
11714 None
11715 }
11716 }
11717
11718 #[allow(irrefutable_let_patterns)]
11719 pub fn into_set_debug_timeout_log_deadline(
11720 self,
11721 ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionTokenGroupControlHandle)>
11722 {
11723 if let BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline {
11724 payload,
11725 control_handle,
11726 } = self
11727 {
11728 Some((payload, control_handle))
11729 } else {
11730 None
11731 }
11732 }
11733
11734 #[allow(irrefutable_let_patterns)]
11735 pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11736 if let BufferCollectionTokenGroupRequest::SetVerboseLogging { control_handle } = self {
11737 Some((control_handle))
11738 } else {
11739 None
11740 }
11741 }
11742
11743 #[allow(irrefutable_let_patterns)]
11744 pub fn into_get_node_ref(self) -> Option<(BufferCollectionTokenGroupGetNodeRefResponder)> {
11745 if let BufferCollectionTokenGroupRequest::GetNodeRef { responder } = self {
11746 Some((responder))
11747 } else {
11748 None
11749 }
11750 }
11751
11752 #[allow(irrefutable_let_patterns)]
11753 pub fn into_is_alternate_for(
11754 self,
11755 ) -> Option<(NodeIsAlternateForRequest, BufferCollectionTokenGroupIsAlternateForResponder)>
11756 {
11757 if let BufferCollectionTokenGroupRequest::IsAlternateFor { payload, responder } = self {
11758 Some((payload, responder))
11759 } else {
11760 None
11761 }
11762 }
11763
11764 #[allow(irrefutable_let_patterns)]
11765 pub fn into_get_buffer_collection_id(
11766 self,
11767 ) -> Option<(BufferCollectionTokenGroupGetBufferCollectionIdResponder)> {
11768 if let BufferCollectionTokenGroupRequest::GetBufferCollectionId { responder } = self {
11769 Some((responder))
11770 } else {
11771 None
11772 }
11773 }
11774
11775 #[allow(irrefutable_let_patterns)]
11776 pub fn into_set_weak(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11777 if let BufferCollectionTokenGroupRequest::SetWeak { control_handle } = self {
11778 Some((control_handle))
11779 } else {
11780 None
11781 }
11782 }
11783
11784 #[allow(irrefutable_let_patterns)]
11785 pub fn into_set_weak_ok(
11786 self,
11787 ) -> Option<(NodeSetWeakOkRequest, BufferCollectionTokenGroupControlHandle)> {
11788 if let BufferCollectionTokenGroupRequest::SetWeakOk { payload, control_handle } = self {
11789 Some((payload, control_handle))
11790 } else {
11791 None
11792 }
11793 }
11794
11795 #[allow(irrefutable_let_patterns)]
11796 pub fn into_attach_node_tracking(
11797 self,
11798 ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionTokenGroupControlHandle)> {
11799 if let BufferCollectionTokenGroupRequest::AttachNodeTracking { payload, control_handle } =
11800 self
11801 {
11802 Some((payload, control_handle))
11803 } else {
11804 None
11805 }
11806 }
11807
11808 #[allow(irrefutable_let_patterns)]
11809 pub fn into_create_child(
11810 self,
11811 ) -> Option<(
11812 BufferCollectionTokenGroupCreateChildRequest,
11813 BufferCollectionTokenGroupControlHandle,
11814 )> {
11815 if let BufferCollectionTokenGroupRequest::CreateChild { payload, control_handle } = self {
11816 Some((payload, control_handle))
11817 } else {
11818 None
11819 }
11820 }
11821
11822 #[allow(irrefutable_let_patterns)]
11823 pub fn into_create_children_sync(
11824 self,
11825 ) -> Option<(
11826 BufferCollectionTokenGroupCreateChildrenSyncRequest,
11827 BufferCollectionTokenGroupCreateChildrenSyncResponder,
11828 )> {
11829 if let BufferCollectionTokenGroupRequest::CreateChildrenSync { payload, responder } = self {
11830 Some((payload, responder))
11831 } else {
11832 None
11833 }
11834 }
11835
11836 #[allow(irrefutable_let_patterns)]
11837 pub fn into_all_children_present(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11838 if let BufferCollectionTokenGroupRequest::AllChildrenPresent { control_handle } = self {
11839 Some((control_handle))
11840 } else {
11841 None
11842 }
11843 }
11844
11845 /// Name of the method defined in FIDL
11846 pub fn method_name(&self) -> &'static str {
11847 match *self {
11848 BufferCollectionTokenGroupRequest::Sync { .. } => "sync",
11849 BufferCollectionTokenGroupRequest::Release { .. } => "release",
11850 BufferCollectionTokenGroupRequest::SetName { .. } => "set_name",
11851 BufferCollectionTokenGroupRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
11852 BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline { .. } => {
11853 "set_debug_timeout_log_deadline"
11854 }
11855 BufferCollectionTokenGroupRequest::SetVerboseLogging { .. } => "set_verbose_logging",
11856 BufferCollectionTokenGroupRequest::GetNodeRef { .. } => "get_node_ref",
11857 BufferCollectionTokenGroupRequest::IsAlternateFor { .. } => "is_alternate_for",
11858 BufferCollectionTokenGroupRequest::GetBufferCollectionId { .. } => {
11859 "get_buffer_collection_id"
11860 }
11861 BufferCollectionTokenGroupRequest::SetWeak { .. } => "set_weak",
11862 BufferCollectionTokenGroupRequest::SetWeakOk { .. } => "set_weak_ok",
11863 BufferCollectionTokenGroupRequest::AttachNodeTracking { .. } => "attach_node_tracking",
11864 BufferCollectionTokenGroupRequest::CreateChild { .. } => "create_child",
11865 BufferCollectionTokenGroupRequest::CreateChildrenSync { .. } => "create_children_sync",
11866 BufferCollectionTokenGroupRequest::AllChildrenPresent { .. } => "all_children_present",
11867 BufferCollectionTokenGroupRequest::_UnknownMethod {
11868 method_type: fidl::MethodType::OneWay,
11869 ..
11870 } => "unknown one-way method",
11871 BufferCollectionTokenGroupRequest::_UnknownMethod {
11872 method_type: fidl::MethodType::TwoWay,
11873 ..
11874 } => "unknown two-way method",
11875 }
11876 }
11877}
11878
11879#[derive(Debug, Clone)]
11880pub struct BufferCollectionTokenGroupControlHandle {
11881 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
11882}
11883
11884impl fidl::endpoints::ControlHandle for BufferCollectionTokenGroupControlHandle {
11885 fn shutdown(&self) {
11886 self.inner.shutdown()
11887 }
11888 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
11889 self.inner.shutdown_with_epitaph(status)
11890 }
11891
11892 fn is_closed(&self) -> bool {
11893 self.inner.channel().is_closed()
11894 }
11895 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
11896 self.inner.channel().on_closed()
11897 }
11898
11899 #[cfg(target_os = "fuchsia")]
11900 fn signal_peer(
11901 &self,
11902 clear_mask: zx::Signals,
11903 set_mask: zx::Signals,
11904 ) -> Result<(), zx_status::Status> {
11905 use fidl::Peered;
11906 self.inner.channel().signal_peer(clear_mask, set_mask)
11907 }
11908}
11909
11910impl BufferCollectionTokenGroupControlHandle {}
11911
11912#[must_use = "FIDL methods require a response to be sent"]
11913#[derive(Debug)]
11914pub struct BufferCollectionTokenGroupSyncResponder {
11915 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
11916 tx_id: u32,
11917}
11918
11919/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
11920/// if the responder is dropped without sending a response, so that the client
11921/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
11922impl std::ops::Drop for BufferCollectionTokenGroupSyncResponder {
11923 fn drop(&mut self) {
11924 self.control_handle.shutdown();
11925 // Safety: drops once, never accessed again
11926 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11927 }
11928}
11929
11930impl fidl::endpoints::Responder for BufferCollectionTokenGroupSyncResponder {
11931 type ControlHandle = BufferCollectionTokenGroupControlHandle;
11932
11933 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
11934 &self.control_handle
11935 }
11936
11937 fn drop_without_shutdown(mut self) {
11938 // Safety: drops once, never accessed again due to mem::forget
11939 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11940 // Prevent Drop from running (which would shut down the channel)
11941 std::mem::forget(self);
11942 }
11943}
11944
11945impl BufferCollectionTokenGroupSyncResponder {
11946 /// Sends a response to the FIDL transaction.
11947 ///
11948 /// Sets the channel to shutdown if an error occurs.
11949 pub fn send(self) -> Result<(), fidl::Error> {
11950 let _result = self.send_raw();
11951 if _result.is_err() {
11952 self.control_handle.shutdown();
11953 }
11954 self.drop_without_shutdown();
11955 _result
11956 }
11957
11958 /// Similar to "send" but does not shutdown the channel if an error occurs.
11959 pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
11960 let _result = self.send_raw();
11961 self.drop_without_shutdown();
11962 _result
11963 }
11964
11965 fn send_raw(&self) -> Result<(), fidl::Error> {
11966 self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
11967 fidl::encoding::Flexible::new(()),
11968 self.tx_id,
11969 0x11ac2555cf575b54,
11970 fidl::encoding::DynamicFlags::FLEXIBLE,
11971 )
11972 }
11973}
11974
11975#[must_use = "FIDL methods require a response to be sent"]
11976#[derive(Debug)]
11977pub struct BufferCollectionTokenGroupGetNodeRefResponder {
11978 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
11979 tx_id: u32,
11980}
11981
11982/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
11983/// if the responder is dropped without sending a response, so that the client
11984/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
11985impl std::ops::Drop for BufferCollectionTokenGroupGetNodeRefResponder {
11986 fn drop(&mut self) {
11987 self.control_handle.shutdown();
11988 // Safety: drops once, never accessed again
11989 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11990 }
11991}
11992
11993impl fidl::endpoints::Responder for BufferCollectionTokenGroupGetNodeRefResponder {
11994 type ControlHandle = BufferCollectionTokenGroupControlHandle;
11995
11996 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
11997 &self.control_handle
11998 }
11999
12000 fn drop_without_shutdown(mut self) {
12001 // Safety: drops once, never accessed again due to mem::forget
12002 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12003 // Prevent Drop from running (which would shut down the channel)
12004 std::mem::forget(self);
12005 }
12006}
12007
12008impl BufferCollectionTokenGroupGetNodeRefResponder {
12009 /// Sends a response to the FIDL transaction.
12010 ///
12011 /// Sets the channel to shutdown if an error occurs.
12012 pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
12013 let _result = self.send_raw(payload);
12014 if _result.is_err() {
12015 self.control_handle.shutdown();
12016 }
12017 self.drop_without_shutdown();
12018 _result
12019 }
12020
12021 /// Similar to "send" but does not shutdown the channel if an error occurs.
12022 pub fn send_no_shutdown_on_err(
12023 self,
12024 mut payload: NodeGetNodeRefResponse,
12025 ) -> Result<(), fidl::Error> {
12026 let _result = self.send_raw(payload);
12027 self.drop_without_shutdown();
12028 _result
12029 }
12030
12031 fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
12032 self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
12033 fidl::encoding::Flexible::new(&mut payload),
12034 self.tx_id,
12035 0x5b3d0e51614df053,
12036 fidl::encoding::DynamicFlags::FLEXIBLE,
12037 )
12038 }
12039}
12040
12041#[must_use = "FIDL methods require a response to be sent"]
12042#[derive(Debug)]
12043pub struct BufferCollectionTokenGroupIsAlternateForResponder {
12044 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12045 tx_id: u32,
12046}
12047
12048/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12049/// if the responder is dropped without sending a response, so that the client
12050/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12051impl std::ops::Drop for BufferCollectionTokenGroupIsAlternateForResponder {
12052 fn drop(&mut self) {
12053 self.control_handle.shutdown();
12054 // Safety: drops once, never accessed again
12055 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12056 }
12057}
12058
12059impl fidl::endpoints::Responder for BufferCollectionTokenGroupIsAlternateForResponder {
12060 type ControlHandle = BufferCollectionTokenGroupControlHandle;
12061
12062 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12063 &self.control_handle
12064 }
12065
12066 fn drop_without_shutdown(mut self) {
12067 // Safety: drops once, never accessed again due to mem::forget
12068 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12069 // Prevent Drop from running (which would shut down the channel)
12070 std::mem::forget(self);
12071 }
12072}
12073
12074impl BufferCollectionTokenGroupIsAlternateForResponder {
12075 /// Sends a response to the FIDL transaction.
12076 ///
12077 /// Sets the channel to shutdown if an error occurs.
12078 pub fn send(
12079 self,
12080 mut result: Result<&NodeIsAlternateForResponse, Error>,
12081 ) -> Result<(), fidl::Error> {
12082 let _result = self.send_raw(result);
12083 if _result.is_err() {
12084 self.control_handle.shutdown();
12085 }
12086 self.drop_without_shutdown();
12087 _result
12088 }
12089
12090 /// Similar to "send" but does not shutdown the channel if an error occurs.
12091 pub fn send_no_shutdown_on_err(
12092 self,
12093 mut result: Result<&NodeIsAlternateForResponse, Error>,
12094 ) -> Result<(), fidl::Error> {
12095 let _result = self.send_raw(result);
12096 self.drop_without_shutdown();
12097 _result
12098 }
12099
12100 fn send_raw(
12101 &self,
12102 mut result: Result<&NodeIsAlternateForResponse, Error>,
12103 ) -> Result<(), fidl::Error> {
12104 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
12105 NodeIsAlternateForResponse,
12106 Error,
12107 >>(
12108 fidl::encoding::FlexibleResult::new(result),
12109 self.tx_id,
12110 0x3a58e00157e0825,
12111 fidl::encoding::DynamicFlags::FLEXIBLE,
12112 )
12113 }
12114}
12115
12116#[must_use = "FIDL methods require a response to be sent"]
12117#[derive(Debug)]
12118pub struct BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12119 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12120 tx_id: u32,
12121}
12122
12123/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12124/// if the responder is dropped without sending a response, so that the client
12125/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12126impl std::ops::Drop for BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12127 fn drop(&mut self) {
12128 self.control_handle.shutdown();
12129 // Safety: drops once, never accessed again
12130 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12131 }
12132}
12133
12134impl fidl::endpoints::Responder for BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12135 type ControlHandle = BufferCollectionTokenGroupControlHandle;
12136
12137 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12138 &self.control_handle
12139 }
12140
12141 fn drop_without_shutdown(mut self) {
12142 // Safety: drops once, never accessed again due to mem::forget
12143 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12144 // Prevent Drop from running (which would shut down the channel)
12145 std::mem::forget(self);
12146 }
12147}
12148
12149impl BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12150 /// Sends a response to the FIDL transaction.
12151 ///
12152 /// Sets the channel to shutdown if an error occurs.
12153 pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
12154 let _result = self.send_raw(payload);
12155 if _result.is_err() {
12156 self.control_handle.shutdown();
12157 }
12158 self.drop_without_shutdown();
12159 _result
12160 }
12161
12162 /// Similar to "send" but does not shutdown the channel if an error occurs.
12163 pub fn send_no_shutdown_on_err(
12164 self,
12165 mut payload: &NodeGetBufferCollectionIdResponse,
12166 ) -> Result<(), fidl::Error> {
12167 let _result = self.send_raw(payload);
12168 self.drop_without_shutdown();
12169 _result
12170 }
12171
12172 fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
12173 self.control_handle
12174 .inner
12175 .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
12176 fidl::encoding::Flexible::new(payload),
12177 self.tx_id,
12178 0x77d19a494b78ba8c,
12179 fidl::encoding::DynamicFlags::FLEXIBLE,
12180 )
12181 }
12182}
12183
12184#[must_use = "FIDL methods require a response to be sent"]
12185#[derive(Debug)]
12186pub struct BufferCollectionTokenGroupCreateChildrenSyncResponder {
12187 control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12188 tx_id: u32,
12189}
12190
12191/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12192/// if the responder is dropped without sending a response, so that the client
12193/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12194impl std::ops::Drop for BufferCollectionTokenGroupCreateChildrenSyncResponder {
12195 fn drop(&mut self) {
12196 self.control_handle.shutdown();
12197 // Safety: drops once, never accessed again
12198 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12199 }
12200}
12201
12202impl fidl::endpoints::Responder for BufferCollectionTokenGroupCreateChildrenSyncResponder {
12203 type ControlHandle = BufferCollectionTokenGroupControlHandle;
12204
12205 fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12206 &self.control_handle
12207 }
12208
12209 fn drop_without_shutdown(mut self) {
12210 // Safety: drops once, never accessed again due to mem::forget
12211 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12212 // Prevent Drop from running (which would shut down the channel)
12213 std::mem::forget(self);
12214 }
12215}
12216
12217impl BufferCollectionTokenGroupCreateChildrenSyncResponder {
12218 /// Sends a response to the FIDL transaction.
12219 ///
12220 /// Sets the channel to shutdown if an error occurs.
12221 pub fn send(
12222 self,
12223 mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12224 ) -> Result<(), fidl::Error> {
12225 let _result = self.send_raw(payload);
12226 if _result.is_err() {
12227 self.control_handle.shutdown();
12228 }
12229 self.drop_without_shutdown();
12230 _result
12231 }
12232
12233 /// Similar to "send" but does not shutdown the channel if an error occurs.
12234 pub fn send_no_shutdown_on_err(
12235 self,
12236 mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12237 ) -> Result<(), fidl::Error> {
12238 let _result = self.send_raw(payload);
12239 self.drop_without_shutdown();
12240 _result
12241 }
12242
12243 fn send_raw(
12244 &self,
12245 mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12246 ) -> Result<(), fidl::Error> {
12247 self.control_handle.inner.send::<fidl::encoding::FlexibleType<
12248 BufferCollectionTokenGroupCreateChildrenSyncResponse,
12249 >>(
12250 fidl::encoding::Flexible::new(&mut payload),
12251 self.tx_id,
12252 0x15dea448c536070a,
12253 fidl::encoding::DynamicFlags::FLEXIBLE,
12254 )
12255 }
12256}
12257
12258#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
12259pub struct NodeMarker;
12260
12261impl fidl::endpoints::ProtocolMarker for NodeMarker {
12262 type Proxy = NodeProxy;
12263 type RequestStream = NodeRequestStream;
12264 #[cfg(target_os = "fuchsia")]
12265 type SynchronousProxy = NodeSynchronousProxy;
12266
12267 const DEBUG_NAME: &'static str = "(anonymous) Node";
12268}
12269pub type NodeIsAlternateForResult = Result<NodeIsAlternateForResponse, Error>;
12270
12271pub trait NodeProxyInterface: Send + Sync {
12272 type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
12273 fn r#sync(&self) -> Self::SyncResponseFut;
12274 fn r#release(&self) -> Result<(), fidl::Error>;
12275 fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
12276 fn r#set_debug_client_info(
12277 &self,
12278 payload: &NodeSetDebugClientInfoRequest,
12279 ) -> Result<(), fidl::Error>;
12280 fn r#set_debug_timeout_log_deadline(
12281 &self,
12282 payload: &NodeSetDebugTimeoutLogDeadlineRequest,
12283 ) -> Result<(), fidl::Error>;
12284 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
12285 type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
12286 + Send;
12287 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
12288 type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
12289 + Send;
12290 fn r#is_alternate_for(
12291 &self,
12292 payload: NodeIsAlternateForRequest,
12293 ) -> Self::IsAlternateForResponseFut;
12294 type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
12295 + Send;
12296 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
12297 fn r#set_weak(&self) -> Result<(), fidl::Error>;
12298 fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
12299 fn r#attach_node_tracking(
12300 &self,
12301 payload: NodeAttachNodeTrackingRequest,
12302 ) -> Result<(), fidl::Error>;
12303}
12304#[derive(Debug)]
12305#[cfg(target_os = "fuchsia")]
12306pub struct NodeSynchronousProxy {
12307 client: fidl::client::sync::Client,
12308}
12309
12310#[cfg(target_os = "fuchsia")]
12311impl fidl::endpoints::SynchronousProxy for NodeSynchronousProxy {
12312 type Proxy = NodeProxy;
12313 type Protocol = NodeMarker;
12314
12315 fn from_channel(inner: fidl::Channel) -> Self {
12316 Self::new(inner)
12317 }
12318
12319 fn into_channel(self) -> fidl::Channel {
12320 self.client.into_channel()
12321 }
12322
12323 fn as_channel(&self) -> &fidl::Channel {
12324 self.client.as_channel()
12325 }
12326}
12327
12328#[cfg(target_os = "fuchsia")]
12329impl NodeSynchronousProxy {
12330 pub fn new(channel: fidl::Channel) -> Self {
12331 let protocol_name = <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
12332 Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
12333 }
12334
12335 pub fn into_channel(self) -> fidl::Channel {
12336 self.client.into_channel()
12337 }
12338
12339 /// Waits until an event arrives and returns it. It is safe for other
12340 /// threads to make concurrent requests while waiting for an event.
12341 pub fn wait_for_event(&self, deadline: zx::MonotonicInstant) -> Result<NodeEvent, fidl::Error> {
12342 NodeEvent::decode(self.client.wait_for_event(deadline)?)
12343 }
12344
12345 /// Ensure that previous messages have been received server side. This is
12346 /// particularly useful after previous messages that created new tokens,
12347 /// because a token must be known to the sysmem server before sending the
12348 /// token to another participant.
12349 ///
12350 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
12351 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
12352 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
12353 /// to mitigate the possibility of a hostile/fake
12354 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
12355 /// Another way is to pass the token to
12356 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
12357 /// the token as part of exchanging it for a
12358 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
12359 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
12360 /// of stalling.
12361 ///
12362 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
12363 /// and then starting and completing a `Sync`, it's then safe to send the
12364 /// `BufferCollectionToken` client ends to other participants knowing the
12365 /// server will recognize the tokens when they're sent by the other
12366 /// participants to sysmem in a
12367 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
12368 /// efficient way to create tokens while avoiding unnecessary round trips.
12369 ///
12370 /// Other options include waiting for each
12371 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
12372 /// individually (using separate call to `Sync` after each), or calling
12373 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
12374 /// converted to a `BufferCollection` via
12375 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
12376 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
12377 /// the sync step and can create multiple tokens at once.
12378 pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
12379 let _response = self.client.send_query::<
12380 fidl::encoding::EmptyPayload,
12381 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
12382 >(
12383 (),
12384 0x11ac2555cf575b54,
12385 fidl::encoding::DynamicFlags::FLEXIBLE,
12386 ___deadline,
12387 )?
12388 .into_result::<NodeMarker>("sync")?;
12389 Ok(_response)
12390 }
12391
12392 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
12393 ///
12394 /// Normally a participant will convert a `BufferCollectionToken` into a
12395 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
12396 /// `Release` via the token (and then close the channel immediately or
12397 /// shortly later in response to server closing the server end), which
12398 /// avoids causing buffer collection failure. Without a prior `Release`,
12399 /// closing the `BufferCollectionToken` client end will cause buffer
12400 /// collection failure.
12401 ///
12402 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
12403 ///
12404 /// By default the server handles unexpected closure of a
12405 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
12406 /// first) by failing the buffer collection. Partly this is to expedite
12407 /// closing VMO handles to reclaim memory when any participant fails. If a
12408 /// participant would like to cleanly close a `BufferCollection` without
12409 /// causing buffer collection failure, the participant can send `Release`
12410 /// before closing the `BufferCollection` client end. The `Release` can
12411 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
12412 /// buffer collection won't require constraints from this node in order to
12413 /// allocate. If after `SetConstraints`, the constraints are retained and
12414 /// aggregated, despite the lack of `BufferCollection` connection at the
12415 /// time of constraints aggregation.
12416 ///
12417 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
12418 ///
12419 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
12420 /// end (without `Release` first) will trigger failure of the buffer
12421 /// collection. To close a `BufferCollectionTokenGroup` channel without
12422 /// failing the buffer collection, ensure that AllChildrenPresent() has been
12423 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
12424 /// client end.
12425 ///
12426 /// If `Release` occurs before
12427 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
12428 /// buffer collection will fail (triggered by reception of `Release` without
12429 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
12430 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
12431 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
12432 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
12433 /// close requires `AllChildrenPresent` (if not already sent), then
12434 /// `Release`, then close client end.
12435 ///
12436 /// If `Release` occurs after `AllChildrenPresent`, the children and all
12437 /// their constraints remain intact (just as they would if the
12438 /// `BufferCollectionTokenGroup` channel had remained open), and the client
12439 /// end close doesn't trigger buffer collection failure.
12440 ///
12441 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
12442 ///
12443 /// For brevity, the per-channel-protocol paragraphs above ignore the
12444 /// separate failure domain created by
12445 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
12446 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
12447 /// unexpectedly closes (without `Release` first) and that client end is
12448 /// under a failure domain, instead of failing the whole buffer collection,
12449 /// the failure domain is failed, but the buffer collection itself is
12450 /// isolated from failure of the failure domain. Such failure domains can be
12451 /// nested, in which case only the inner-most failure domain in which the
12452 /// `Node` resides fails.
12453 pub fn r#release(&self) -> Result<(), fidl::Error> {
12454 self.client.send::<fidl::encoding::EmptyPayload>(
12455 (),
12456 0x6a5cae7d6d6e04c6,
12457 fidl::encoding::DynamicFlags::FLEXIBLE,
12458 )
12459 }
12460
12461 /// Set a name for VMOs in this buffer collection.
12462 ///
12463 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
12464 /// will be truncated to fit. The name of the vmo will be suffixed with the
12465 /// buffer index within the collection (if the suffix fits within
12466 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
12467 /// listed in the inspect data.
12468 ///
12469 /// The name only affects VMOs allocated after the name is set; this call
12470 /// does not rename existing VMOs. If multiple clients set different names
12471 /// then the larger priority value will win. Setting a new name with the
12472 /// same priority as a prior name doesn't change the name.
12473 ///
12474 /// All table fields are currently required.
12475 ///
12476 /// + request `priority` The name is only set if this is the first `SetName`
12477 /// or if `priority` is greater than any previous `priority` value in
12478 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
12479 /// + request `name` The name for VMOs created under this buffer collection.
12480 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
12481 self.client.send::<NodeSetNameRequest>(
12482 payload,
12483 0xb41f1624f48c1e9,
12484 fidl::encoding::DynamicFlags::FLEXIBLE,
12485 )
12486 }
12487
12488 /// Set information about the current client that can be used by sysmem to
12489 /// help diagnose leaking memory and allocation stalls waiting for a
12490 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
12491 ///
12492 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
12493 /// `Node`(s) derived from this `Node`, unless overriden by
12494 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
12495 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
12496 ///
12497 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
12498 /// `Allocator` is the most efficient way to ensure that all
12499 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
12500 /// set, and is also more efficient than separately sending the same debug
12501 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
12502 /// created [`fuchsia.sysmem2/Node`].
12503 ///
12504 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
12505 /// indicate which client is closing their channel first, leading to subtree
12506 /// failure (which can be normal if the purpose of the subtree is over, but
12507 /// if happening earlier than expected, the client-channel-specific name can
12508 /// help diagnose where the failure is first coming from, from sysmem's
12509 /// point of view).
12510 ///
12511 /// All table fields are currently required.
12512 ///
12513 /// + request `name` This can be an arbitrary string, but the current
12514 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
12515 /// + request `id` This can be an arbitrary id, but the current process ID
12516 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
12517 pub fn r#set_debug_client_info(
12518 &self,
12519 mut payload: &NodeSetDebugClientInfoRequest,
12520 ) -> Result<(), fidl::Error> {
12521 self.client.send::<NodeSetDebugClientInfoRequest>(
12522 payload,
12523 0x5cde8914608d99b1,
12524 fidl::encoding::DynamicFlags::FLEXIBLE,
12525 )
12526 }
12527
12528 /// Sysmem logs a warning if sysmem hasn't seen
12529 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
12530 /// within 5 seconds after creation of a new collection.
12531 ///
12532 /// Clients can call this method to change when the log is printed. If
12533 /// multiple client set the deadline, it's unspecified which deadline will
12534 /// take effect.
12535 ///
12536 /// In most cases the default works well.
12537 ///
12538 /// All table fields are currently required.
12539 ///
12540 /// + request `deadline` The time at which sysmem will start trying to log
12541 /// the warning, unless all constraints are with sysmem by then.
12542 pub fn r#set_debug_timeout_log_deadline(
12543 &self,
12544 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
12545 ) -> Result<(), fidl::Error> {
12546 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
12547 payload,
12548 0x716b0af13d5c0806,
12549 fidl::encoding::DynamicFlags::FLEXIBLE,
12550 )
12551 }
12552
12553 /// This enables verbose logging for the buffer collection.
12554 ///
12555 /// Verbose logging includes constraints set via
12556 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
12557 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
12558 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
12559 /// the tree of `Node`(s).
12560 ///
12561 /// Normally sysmem prints only a single line complaint when aggregation
12562 /// fails, with just the specific detailed reason that aggregation failed,
12563 /// with little surrounding context. While this is often enough to diagnose
12564 /// a problem if only a small change was made and everything was working
12565 /// before the small change, it's often not particularly helpful for getting
12566 /// a new buffer collection to work for the first time. Especially with
12567 /// more complex trees of nodes, involving things like
12568 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
12569 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
12570 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
12571 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
12572 /// looks like and why it's failing a logical allocation, or why a tree or
12573 /// subtree is failing sooner than expected.
12574 ///
12575 /// The intent of the extra logging is to be acceptable from a performance
12576 /// point of view, under the assumption that verbose logging is only enabled
12577 /// on a low number of buffer collections. If we're not tracking down a bug,
12578 /// we shouldn't send this message.
12579 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
12580 self.client.send::<fidl::encoding::EmptyPayload>(
12581 (),
12582 0x5209c77415b4dfad,
12583 fidl::encoding::DynamicFlags::FLEXIBLE,
12584 )
12585 }
12586
12587 /// This gets a handle that can be used as a parameter to
12588 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
12589 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
12590 /// client obtained this handle from this `Node`.
12591 ///
12592 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
12593 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
12594 /// despite the two calls typically being on different channels.
12595 ///
12596 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
12597 ///
12598 /// All table fields are currently required.
12599 ///
12600 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
12601 /// different `Node` channel, to prove that the client obtained the handle
12602 /// from this `Node`.
12603 pub fn r#get_node_ref(
12604 &self,
12605 ___deadline: zx::MonotonicInstant,
12606 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
12607 let _response = self.client.send_query::<
12608 fidl::encoding::EmptyPayload,
12609 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
12610 >(
12611 (),
12612 0x5b3d0e51614df053,
12613 fidl::encoding::DynamicFlags::FLEXIBLE,
12614 ___deadline,
12615 )?
12616 .into_result::<NodeMarker>("get_node_ref")?;
12617 Ok(_response)
12618 }
12619
12620 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
12621 /// rooted at a different child token of a common parent
12622 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
12623 /// passed-in `node_ref`.
12624 ///
12625 /// This call is for assisting with admission control de-duplication, and
12626 /// with debugging.
12627 ///
12628 /// The `node_ref` must be obtained using
12629 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
12630 ///
12631 /// The `node_ref` can be a duplicated handle; it's not necessary to call
12632 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
12633 ///
12634 /// If a calling token may not actually be a valid token at all due to a
12635 /// potentially hostile/untrusted provider of the token, call
12636 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
12637 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
12638 /// never responds due to a calling token not being a real token (not really
12639 /// talking to sysmem). Another option is to call
12640 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
12641 /// which also validates the token along with converting it to a
12642 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
12643 ///
12644 /// All table fields are currently required.
12645 ///
12646 /// - response `is_alternate`
12647 /// - true: The first parent node in common between the calling node and
12648 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
12649 /// that the calling `Node` and the `node_ref` `Node` will not have both
12650 /// their constraints apply - rather sysmem will choose one or the other
12651 /// of the constraints - never both. This is because only one child of
12652 /// a `BufferCollectionTokenGroup` is selected during logical
12653 /// allocation, with only that one child's subtree contributing to
12654 /// constraints aggregation.
12655 /// - false: The first parent node in common between the calling `Node`
12656 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
12657 /// Currently, this means the first parent node in common is a
12658 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
12659 /// `Release`ed). This means that the calling `Node` and the `node_ref`
12660 /// `Node` may have both their constraints apply during constraints
12661 /// aggregation of the logical allocation, if both `Node`(s) are
12662 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
12663 /// this case, there is no `BufferCollectionTokenGroup` that will
12664 /// directly prevent the two `Node`(s) from both being selected and
12665 /// their constraints both aggregated, but even when false, one or both
12666 /// `Node`(s) may still be eliminated from consideration if one or both
12667 /// `Node`(s) has a direct or indirect parent
12668 /// `BufferCollectionTokenGroup` which selects a child subtree other
12669 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
12670 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
12671 /// associated with the same buffer collection as the calling `Node`.
12672 /// Another reason for this error is if the `node_ref` is an
12673 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
12674 /// a real `node_ref` obtained from `GetNodeRef`.
12675 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
12676 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
12677 /// the needed rights expected on a real `node_ref`.
12678 /// * No other failing status codes are returned by this call. However,
12679 /// sysmem may add additional codes in future, so the client should have
12680 /// sensible default handling for any failing status code.
12681 pub fn r#is_alternate_for(
12682 &self,
12683 mut payload: NodeIsAlternateForRequest,
12684 ___deadline: zx::MonotonicInstant,
12685 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
12686 let _response = self.client.send_query::<
12687 NodeIsAlternateForRequest,
12688 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
12689 >(
12690 &mut payload,
12691 0x3a58e00157e0825,
12692 fidl::encoding::DynamicFlags::FLEXIBLE,
12693 ___deadline,
12694 )?
12695 .into_result::<NodeMarker>("is_alternate_for")?;
12696 Ok(_response.map(|x| x))
12697 }
12698
12699 /// Get the buffer collection ID. This ID is also available from
12700 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
12701 /// within the collection).
12702 ///
12703 /// This call is mainly useful in situations where we can't convey a
12704 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
12705 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
12706 /// handle, which can be joined back up with a `BufferCollection` client end
12707 /// that was created via a different path. Prefer to convey a
12708 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
12709 ///
12710 /// Trusting a `buffer_collection_id` value from a source other than sysmem
12711 /// is analogous to trusting a koid value from a source other than zircon.
12712 /// Both should be avoided unless really necessary, and both require
12713 /// caution. In some situations it may be reasonable to refer to a
12714 /// pre-established `BufferCollection` by `buffer_collection_id` via a
12715 /// protocol for efficiency reasons, but an incoming value purporting to be
12716 /// a `buffer_collection_id` is not sufficient alone to justify granting the
12717 /// sender of the `buffer_collection_id` any capability. The sender must
12718 /// first prove to a receiver that the sender has/had a VMO or has/had a
12719 /// `BufferCollectionToken` to the same collection by sending a handle that
12720 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
12721 /// `buffer_collection_id` value. The receiver should take care to avoid
12722 /// assuming that a sender had a `BufferCollectionToken` in cases where the
12723 /// sender has only proven that the sender had a VMO.
12724 ///
12725 /// - response `buffer_collection_id` This ID is unique per buffer
12726 /// collection per boot. Each buffer is uniquely identified by the
12727 /// `buffer_collection_id` and `buffer_index` together.
12728 pub fn r#get_buffer_collection_id(
12729 &self,
12730 ___deadline: zx::MonotonicInstant,
12731 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
12732 let _response = self.client.send_query::<
12733 fidl::encoding::EmptyPayload,
12734 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
12735 >(
12736 (),
12737 0x77d19a494b78ba8c,
12738 fidl::encoding::DynamicFlags::FLEXIBLE,
12739 ___deadline,
12740 )?
12741 .into_result::<NodeMarker>("get_buffer_collection_id")?;
12742 Ok(_response)
12743 }
12744
12745 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
12746 /// created after this message to weak, which means that a client's `Node`
12747 /// client end (or a child created after this message) is not alone
12748 /// sufficient to keep allocated VMOs alive.
12749 ///
12750 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
12751 /// `close_weak_asap`.
12752 ///
12753 /// This message is only permitted before the `Node` becomes ready for
12754 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
12755 /// * `BufferCollectionToken`: any time
12756 /// * `BufferCollection`: before `SetConstraints`
12757 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
12758 ///
12759 /// Currently, no conversion from strong `Node` to weak `Node` after ready
12760 /// for allocation is provided, but a client can simulate that by creating
12761 /// an additional `Node` before allocation and setting that additional
12762 /// `Node` to weak, and then potentially at some point later sending
12763 /// `Release` and closing the client end of the client's strong `Node`, but
12764 /// keeping the client's weak `Node`.
12765 ///
12766 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
12767 /// collection failure (all `Node` client end(s) will see
12768 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
12769 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
12770 /// this situation until all `Node`(s) are ready for allocation. For initial
12771 /// allocation to succeed, at least one strong `Node` is required to exist
12772 /// at allocation time, but after that client receives VMO handles, that
12773 /// client can `BufferCollection.Release` and close the client end without
12774 /// causing this type of failure.
12775 ///
12776 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
12777 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
12778 /// separately as appropriate.
12779 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
12780 self.client.send::<fidl::encoding::EmptyPayload>(
12781 (),
12782 0x22dd3ea514eeffe1,
12783 fidl::encoding::DynamicFlags::FLEXIBLE,
12784 )
12785 }
12786
12787 /// This indicates to sysmem that the client is prepared to pay attention to
12788 /// `close_weak_asap`.
12789 ///
12790 /// If sent, this message must be before
12791 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
12792 ///
12793 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
12794 /// send this message before `WaitForAllBuffersAllocated`, or a parent
12795 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
12796 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
12797 /// trigger buffer collection failure.
12798 ///
12799 /// This message is necessary because weak sysmem VMOs have not always been
12800 /// a thing, so older clients are not aware of the need to pay attention to
12801 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
12802 /// sysmem weak VMO handles asap. By having this message and requiring
12803 /// participants to indicate their acceptance of this aspect of the overall
12804 /// protocol, we avoid situations where an older client is delivered a weak
12805 /// VMO without any way for sysmem to get that VMO to close quickly later
12806 /// (and on a per-buffer basis).
12807 ///
12808 /// A participant that doesn't handle `close_weak_asap` and also doesn't
12809 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
12810 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
12811 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
12812 /// same participant has a child/delegate which does retrieve VMOs, that
12813 /// child/delegate will need to send `SetWeakOk` before
12814 /// `WaitForAllBuffersAllocated`.
12815 ///
12816 /// + request `for_child_nodes_also` If present and true, this means direct
12817 /// child nodes of this node created after this message plus all
12818 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
12819 /// those nodes. Any child node of this node that was created before this
12820 /// message is not included. This setting is "sticky" in the sense that a
12821 /// subsequent `SetWeakOk` without this bool set to true does not reset
12822 /// the server-side bool. If this creates a problem for a participant, a
12823 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
12824 /// tokens instead, as appropriate. A participant should only set
12825 /// `for_child_nodes_also` true if the participant can really promise to
12826 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
12827 /// weak VMO handles held by participants holding the corresponding child
12828 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
12829 /// which are using sysmem(1) can be weak, despite the clients of those
12830 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
12831 /// direct way to find out about `close_weak_asap`. This only applies to
12832 /// descendents of this `Node` which are using sysmem(1), not to this
12833 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
12834 /// token, which will fail allocation unless an ancestor of this `Node`
12835 /// specified `for_child_nodes_also` true.
12836 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
12837 self.client.send::<NodeSetWeakOkRequest>(
12838 &mut payload,
12839 0x38a44fc4d7724be9,
12840 fidl::encoding::DynamicFlags::FLEXIBLE,
12841 )
12842 }
12843
12844 /// The server_end will be closed after this `Node` and any child nodes have
12845 /// have released their buffer counts, making those counts available for
12846 /// reservation by a different `Node` via
12847 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
12848 ///
12849 /// The `Node` buffer counts may not be released until the entire tree of
12850 /// `Node`(s) is closed or failed, because
12851 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
12852 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
12853 /// `Node` buffer counts remain reserved until the orphaned node is later
12854 /// cleaned up.
12855 ///
12856 /// If the `Node` exceeds a fairly large number of attached eventpair server
12857 /// ends, a log message will indicate this and the `Node` (and the
12858 /// appropriate) sub-tree will fail.
12859 ///
12860 /// The `server_end` will remain open when
12861 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
12862 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
12863 /// [`fuchsia.sysmem2/BufferCollection`].
12864 ///
12865 /// This message can also be used with a
12866 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
12867 pub fn r#attach_node_tracking(
12868 &self,
12869 mut payload: NodeAttachNodeTrackingRequest,
12870 ) -> Result<(), fidl::Error> {
12871 self.client.send::<NodeAttachNodeTrackingRequest>(
12872 &mut payload,
12873 0x3f22f2a293d3cdac,
12874 fidl::encoding::DynamicFlags::FLEXIBLE,
12875 )
12876 }
12877}
12878
12879#[cfg(target_os = "fuchsia")]
12880impl From<NodeSynchronousProxy> for zx::Handle {
12881 fn from(value: NodeSynchronousProxy) -> Self {
12882 value.into_channel().into()
12883 }
12884}
12885
12886#[cfg(target_os = "fuchsia")]
12887impl From<fidl::Channel> for NodeSynchronousProxy {
12888 fn from(value: fidl::Channel) -> Self {
12889 Self::new(value)
12890 }
12891}
12892
12893#[cfg(target_os = "fuchsia")]
12894impl fidl::endpoints::FromClient for NodeSynchronousProxy {
12895 type Protocol = NodeMarker;
12896
12897 fn from_client(value: fidl::endpoints::ClientEnd<NodeMarker>) -> Self {
12898 Self::new(value.into_channel())
12899 }
12900}
12901
12902#[derive(Debug, Clone)]
12903pub struct NodeProxy {
12904 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
12905}
12906
12907impl fidl::endpoints::Proxy for NodeProxy {
12908 type Protocol = NodeMarker;
12909
12910 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
12911 Self::new(inner)
12912 }
12913
12914 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
12915 self.client.into_channel().map_err(|client| Self { client })
12916 }
12917
12918 fn as_channel(&self) -> &::fidl::AsyncChannel {
12919 self.client.as_channel()
12920 }
12921}
12922
12923impl NodeProxy {
12924 /// Create a new Proxy for fuchsia.sysmem2/Node.
12925 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
12926 let protocol_name = <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
12927 Self { client: fidl::client::Client::new(channel, protocol_name) }
12928 }
12929
12930 /// Get a Stream of events from the remote end of the protocol.
12931 ///
12932 /// # Panics
12933 ///
12934 /// Panics if the event stream was already taken.
12935 pub fn take_event_stream(&self) -> NodeEventStream {
12936 NodeEventStream { event_receiver: self.client.take_event_receiver() }
12937 }
12938
12939 /// Ensure that previous messages have been received server side. This is
12940 /// particularly useful after previous messages that created new tokens,
12941 /// because a token must be known to the sysmem server before sending the
12942 /// token to another participant.
12943 ///
12944 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
12945 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
12946 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
12947 /// to mitigate the possibility of a hostile/fake
12948 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
12949 /// Another way is to pass the token to
12950 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
12951 /// the token as part of exchanging it for a
12952 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
12953 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
12954 /// of stalling.
12955 ///
12956 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
12957 /// and then starting and completing a `Sync`, it's then safe to send the
12958 /// `BufferCollectionToken` client ends to other participants knowing the
12959 /// server will recognize the tokens when they're sent by the other
12960 /// participants to sysmem in a
12961 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
12962 /// efficient way to create tokens while avoiding unnecessary round trips.
12963 ///
12964 /// Other options include waiting for each
12965 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
12966 /// individually (using separate call to `Sync` after each), or calling
12967 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
12968 /// converted to a `BufferCollection` via
12969 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
12970 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
12971 /// the sync step and can create multiple tokens at once.
12972 pub fn r#sync(
12973 &self,
12974 ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
12975 NodeProxyInterface::r#sync(self)
12976 }
12977
12978 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
12979 ///
12980 /// Normally a participant will convert a `BufferCollectionToken` into a
12981 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
12982 /// `Release` via the token (and then close the channel immediately or
12983 /// shortly later in response to server closing the server end), which
12984 /// avoids causing buffer collection failure. Without a prior `Release`,
12985 /// closing the `BufferCollectionToken` client end will cause buffer
12986 /// collection failure.
12987 ///
12988 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
12989 ///
12990 /// By default the server handles unexpected closure of a
12991 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
12992 /// first) by failing the buffer collection. Partly this is to expedite
12993 /// closing VMO handles to reclaim memory when any participant fails. If a
12994 /// participant would like to cleanly close a `BufferCollection` without
12995 /// causing buffer collection failure, the participant can send `Release`
12996 /// before closing the `BufferCollection` client end. The `Release` can
12997 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
12998 /// buffer collection won't require constraints from this node in order to
12999 /// allocate. If after `SetConstraints`, the constraints are retained and
13000 /// aggregated, despite the lack of `BufferCollection` connection at the
13001 /// time of constraints aggregation.
13002 ///
13003 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
13004 ///
13005 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
13006 /// end (without `Release` first) will trigger failure of the buffer
13007 /// collection. To close a `BufferCollectionTokenGroup` channel without
13008 /// failing the buffer collection, ensure that AllChildrenPresent() has been
13009 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
13010 /// client end.
13011 ///
13012 /// If `Release` occurs before
13013 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
13014 /// buffer collection will fail (triggered by reception of `Release` without
13015 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
13016 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
13017 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
13018 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
13019 /// close requires `AllChildrenPresent` (if not already sent), then
13020 /// `Release`, then close client end.
13021 ///
13022 /// If `Release` occurs after `AllChildrenPresent`, the children and all
13023 /// their constraints remain intact (just as they would if the
13024 /// `BufferCollectionTokenGroup` channel had remained open), and the client
13025 /// end close doesn't trigger buffer collection failure.
13026 ///
13027 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
13028 ///
13029 /// For brevity, the per-channel-protocol paragraphs above ignore the
13030 /// separate failure domain created by
13031 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
13032 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
13033 /// unexpectedly closes (without `Release` first) and that client end is
13034 /// under a failure domain, instead of failing the whole buffer collection,
13035 /// the failure domain is failed, but the buffer collection itself is
13036 /// isolated from failure of the failure domain. Such failure domains can be
13037 /// nested, in which case only the inner-most failure domain in which the
13038 /// `Node` resides fails.
13039 pub fn r#release(&self) -> Result<(), fidl::Error> {
13040 NodeProxyInterface::r#release(self)
13041 }
13042
13043 /// Set a name for VMOs in this buffer collection.
13044 ///
13045 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
13046 /// will be truncated to fit. The name of the vmo will be suffixed with the
13047 /// buffer index within the collection (if the suffix fits within
13048 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
13049 /// listed in the inspect data.
13050 ///
13051 /// The name only affects VMOs allocated after the name is set; this call
13052 /// does not rename existing VMOs. If multiple clients set different names
13053 /// then the larger priority value will win. Setting a new name with the
13054 /// same priority as a prior name doesn't change the name.
13055 ///
13056 /// All table fields are currently required.
13057 ///
13058 /// + request `priority` The name is only set if this is the first `SetName`
13059 /// or if `priority` is greater than any previous `priority` value in
13060 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
13061 /// + request `name` The name for VMOs created under this buffer collection.
13062 pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
13063 NodeProxyInterface::r#set_name(self, payload)
13064 }
13065
13066 /// Set information about the current client that can be used by sysmem to
13067 /// help diagnose leaking memory and allocation stalls waiting for a
13068 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
13069 ///
13070 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
13071 /// `Node`(s) derived from this `Node`, unless overriden by
13072 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
13073 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
13074 ///
13075 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
13076 /// `Allocator` is the most efficient way to ensure that all
13077 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
13078 /// set, and is also more efficient than separately sending the same debug
13079 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
13080 /// created [`fuchsia.sysmem2/Node`].
13081 ///
13082 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
13083 /// indicate which client is closing their channel first, leading to subtree
13084 /// failure (which can be normal if the purpose of the subtree is over, but
13085 /// if happening earlier than expected, the client-channel-specific name can
13086 /// help diagnose where the failure is first coming from, from sysmem's
13087 /// point of view).
13088 ///
13089 /// All table fields are currently required.
13090 ///
13091 /// + request `name` This can be an arbitrary string, but the current
13092 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
13093 /// + request `id` This can be an arbitrary id, but the current process ID
13094 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
13095 pub fn r#set_debug_client_info(
13096 &self,
13097 mut payload: &NodeSetDebugClientInfoRequest,
13098 ) -> Result<(), fidl::Error> {
13099 NodeProxyInterface::r#set_debug_client_info(self, payload)
13100 }
13101
13102 /// Sysmem logs a warning if sysmem hasn't seen
13103 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
13104 /// within 5 seconds after creation of a new collection.
13105 ///
13106 /// Clients can call this method to change when the log is printed. If
13107 /// multiple client set the deadline, it's unspecified which deadline will
13108 /// take effect.
13109 ///
13110 /// In most cases the default works well.
13111 ///
13112 /// All table fields are currently required.
13113 ///
13114 /// + request `deadline` The time at which sysmem will start trying to log
13115 /// the warning, unless all constraints are with sysmem by then.
13116 pub fn r#set_debug_timeout_log_deadline(
13117 &self,
13118 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
13119 ) -> Result<(), fidl::Error> {
13120 NodeProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
13121 }
13122
13123 /// This enables verbose logging for the buffer collection.
13124 ///
13125 /// Verbose logging includes constraints set via
13126 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
13127 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
13128 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
13129 /// the tree of `Node`(s).
13130 ///
13131 /// Normally sysmem prints only a single line complaint when aggregation
13132 /// fails, with just the specific detailed reason that aggregation failed,
13133 /// with little surrounding context. While this is often enough to diagnose
13134 /// a problem if only a small change was made and everything was working
13135 /// before the small change, it's often not particularly helpful for getting
13136 /// a new buffer collection to work for the first time. Especially with
13137 /// more complex trees of nodes, involving things like
13138 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
13139 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
13140 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
13141 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
13142 /// looks like and why it's failing a logical allocation, or why a tree or
13143 /// subtree is failing sooner than expected.
13144 ///
13145 /// The intent of the extra logging is to be acceptable from a performance
13146 /// point of view, under the assumption that verbose logging is only enabled
13147 /// on a low number of buffer collections. If we're not tracking down a bug,
13148 /// we shouldn't send this message.
13149 pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
13150 NodeProxyInterface::r#set_verbose_logging(self)
13151 }
13152
13153 /// This gets a handle that can be used as a parameter to
13154 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
13155 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
13156 /// client obtained this handle from this `Node`.
13157 ///
13158 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
13159 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
13160 /// despite the two calls typically being on different channels.
13161 ///
13162 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
13163 ///
13164 /// All table fields are currently required.
13165 ///
13166 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
13167 /// different `Node` channel, to prove that the client obtained the handle
13168 /// from this `Node`.
13169 pub fn r#get_node_ref(
13170 &self,
13171 ) -> fidl::client::QueryResponseFut<
13172 NodeGetNodeRefResponse,
13173 fidl::encoding::DefaultFuchsiaResourceDialect,
13174 > {
13175 NodeProxyInterface::r#get_node_ref(self)
13176 }
13177
13178 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
13179 /// rooted at a different child token of a common parent
13180 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
13181 /// passed-in `node_ref`.
13182 ///
13183 /// This call is for assisting with admission control de-duplication, and
13184 /// with debugging.
13185 ///
13186 /// The `node_ref` must be obtained using
13187 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
13188 ///
13189 /// The `node_ref` can be a duplicated handle; it's not necessary to call
13190 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
13191 ///
13192 /// If a calling token may not actually be a valid token at all due to a
13193 /// potentially hostile/untrusted provider of the token, call
13194 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
13195 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
13196 /// never responds due to a calling token not being a real token (not really
13197 /// talking to sysmem). Another option is to call
13198 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
13199 /// which also validates the token along with converting it to a
13200 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
13201 ///
13202 /// All table fields are currently required.
13203 ///
13204 /// - response `is_alternate`
13205 /// - true: The first parent node in common between the calling node and
13206 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
13207 /// that the calling `Node` and the `node_ref` `Node` will not have both
13208 /// their constraints apply - rather sysmem will choose one or the other
13209 /// of the constraints - never both. This is because only one child of
13210 /// a `BufferCollectionTokenGroup` is selected during logical
13211 /// allocation, with only that one child's subtree contributing to
13212 /// constraints aggregation.
13213 /// - false: The first parent node in common between the calling `Node`
13214 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
13215 /// Currently, this means the first parent node in common is a
13216 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
13217 /// `Release`ed). This means that the calling `Node` and the `node_ref`
13218 /// `Node` may have both their constraints apply during constraints
13219 /// aggregation of the logical allocation, if both `Node`(s) are
13220 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
13221 /// this case, there is no `BufferCollectionTokenGroup` that will
13222 /// directly prevent the two `Node`(s) from both being selected and
13223 /// their constraints both aggregated, but even when false, one or both
13224 /// `Node`(s) may still be eliminated from consideration if one or both
13225 /// `Node`(s) has a direct or indirect parent
13226 /// `BufferCollectionTokenGroup` which selects a child subtree other
13227 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
13228 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
13229 /// associated with the same buffer collection as the calling `Node`.
13230 /// Another reason for this error is if the `node_ref` is an
13231 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
13232 /// a real `node_ref` obtained from `GetNodeRef`.
13233 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
13234 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
13235 /// the needed rights expected on a real `node_ref`.
13236 /// * No other failing status codes are returned by this call. However,
13237 /// sysmem may add additional codes in future, so the client should have
13238 /// sensible default handling for any failing status code.
13239 pub fn r#is_alternate_for(
13240 &self,
13241 mut payload: NodeIsAlternateForRequest,
13242 ) -> fidl::client::QueryResponseFut<
13243 NodeIsAlternateForResult,
13244 fidl::encoding::DefaultFuchsiaResourceDialect,
13245 > {
13246 NodeProxyInterface::r#is_alternate_for(self, payload)
13247 }
13248
13249 /// Get the buffer collection ID. This ID is also available from
13250 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
13251 /// within the collection).
13252 ///
13253 /// This call is mainly useful in situations where we can't convey a
13254 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
13255 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
13256 /// handle, which can be joined back up with a `BufferCollection` client end
13257 /// that was created via a different path. Prefer to convey a
13258 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
13259 ///
13260 /// Trusting a `buffer_collection_id` value from a source other than sysmem
13261 /// is analogous to trusting a koid value from a source other than zircon.
13262 /// Both should be avoided unless really necessary, and both require
13263 /// caution. In some situations it may be reasonable to refer to a
13264 /// pre-established `BufferCollection` by `buffer_collection_id` via a
13265 /// protocol for efficiency reasons, but an incoming value purporting to be
13266 /// a `buffer_collection_id` is not sufficient alone to justify granting the
13267 /// sender of the `buffer_collection_id` any capability. The sender must
13268 /// first prove to a receiver that the sender has/had a VMO or has/had a
13269 /// `BufferCollectionToken` to the same collection by sending a handle that
13270 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
13271 /// `buffer_collection_id` value. The receiver should take care to avoid
13272 /// assuming that a sender had a `BufferCollectionToken` in cases where the
13273 /// sender has only proven that the sender had a VMO.
13274 ///
13275 /// - response `buffer_collection_id` This ID is unique per buffer
13276 /// collection per boot. Each buffer is uniquely identified by the
13277 /// `buffer_collection_id` and `buffer_index` together.
13278 pub fn r#get_buffer_collection_id(
13279 &self,
13280 ) -> fidl::client::QueryResponseFut<
13281 NodeGetBufferCollectionIdResponse,
13282 fidl::encoding::DefaultFuchsiaResourceDialect,
13283 > {
13284 NodeProxyInterface::r#get_buffer_collection_id(self)
13285 }
13286
13287 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
13288 /// created after this message to weak, which means that a client's `Node`
13289 /// client end (or a child created after this message) is not alone
13290 /// sufficient to keep allocated VMOs alive.
13291 ///
13292 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
13293 /// `close_weak_asap`.
13294 ///
13295 /// This message is only permitted before the `Node` becomes ready for
13296 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
13297 /// * `BufferCollectionToken`: any time
13298 /// * `BufferCollection`: before `SetConstraints`
13299 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
13300 ///
13301 /// Currently, no conversion from strong `Node` to weak `Node` after ready
13302 /// for allocation is provided, but a client can simulate that by creating
13303 /// an additional `Node` before allocation and setting that additional
13304 /// `Node` to weak, and then potentially at some point later sending
13305 /// `Release` and closing the client end of the client's strong `Node`, but
13306 /// keeping the client's weak `Node`.
13307 ///
13308 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
13309 /// collection failure (all `Node` client end(s) will see
13310 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
13311 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
13312 /// this situation until all `Node`(s) are ready for allocation. For initial
13313 /// allocation to succeed, at least one strong `Node` is required to exist
13314 /// at allocation time, but after that client receives VMO handles, that
13315 /// client can `BufferCollection.Release` and close the client end without
13316 /// causing this type of failure.
13317 ///
13318 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
13319 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
13320 /// separately as appropriate.
13321 pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
13322 NodeProxyInterface::r#set_weak(self)
13323 }
13324
13325 /// This indicates to sysmem that the client is prepared to pay attention to
13326 /// `close_weak_asap`.
13327 ///
13328 /// If sent, this message must be before
13329 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
13330 ///
13331 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
13332 /// send this message before `WaitForAllBuffersAllocated`, or a parent
13333 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
13334 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
13335 /// trigger buffer collection failure.
13336 ///
13337 /// This message is necessary because weak sysmem VMOs have not always been
13338 /// a thing, so older clients are not aware of the need to pay attention to
13339 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
13340 /// sysmem weak VMO handles asap. By having this message and requiring
13341 /// participants to indicate their acceptance of this aspect of the overall
13342 /// protocol, we avoid situations where an older client is delivered a weak
13343 /// VMO without any way for sysmem to get that VMO to close quickly later
13344 /// (and on a per-buffer basis).
13345 ///
13346 /// A participant that doesn't handle `close_weak_asap` and also doesn't
13347 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
13348 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
13349 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
13350 /// same participant has a child/delegate which does retrieve VMOs, that
13351 /// child/delegate will need to send `SetWeakOk` before
13352 /// `WaitForAllBuffersAllocated`.
13353 ///
13354 /// + request `for_child_nodes_also` If present and true, this means direct
13355 /// child nodes of this node created after this message plus all
13356 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
13357 /// those nodes. Any child node of this node that was created before this
13358 /// message is not included. This setting is "sticky" in the sense that a
13359 /// subsequent `SetWeakOk` without this bool set to true does not reset
13360 /// the server-side bool. If this creates a problem for a participant, a
13361 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
13362 /// tokens instead, as appropriate. A participant should only set
13363 /// `for_child_nodes_also` true if the participant can really promise to
13364 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
13365 /// weak VMO handles held by participants holding the corresponding child
13366 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
13367 /// which are using sysmem(1) can be weak, despite the clients of those
13368 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
13369 /// direct way to find out about `close_weak_asap`. This only applies to
13370 /// descendents of this `Node` which are using sysmem(1), not to this
13371 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
13372 /// token, which will fail allocation unless an ancestor of this `Node`
13373 /// specified `for_child_nodes_also` true.
13374 pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
13375 NodeProxyInterface::r#set_weak_ok(self, payload)
13376 }
13377
13378 /// The server_end will be closed after this `Node` and any child nodes have
13379 /// have released their buffer counts, making those counts available for
13380 /// reservation by a different `Node` via
13381 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
13382 ///
13383 /// The `Node` buffer counts may not be released until the entire tree of
13384 /// `Node`(s) is closed or failed, because
13385 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
13386 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
13387 /// `Node` buffer counts remain reserved until the orphaned node is later
13388 /// cleaned up.
13389 ///
13390 /// If the `Node` exceeds a fairly large number of attached eventpair server
13391 /// ends, a log message will indicate this and the `Node` (and the
13392 /// appropriate) sub-tree will fail.
13393 ///
13394 /// The `server_end` will remain open when
13395 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
13396 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
13397 /// [`fuchsia.sysmem2/BufferCollection`].
13398 ///
13399 /// This message can also be used with a
13400 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
13401 pub fn r#attach_node_tracking(
13402 &self,
13403 mut payload: NodeAttachNodeTrackingRequest,
13404 ) -> Result<(), fidl::Error> {
13405 NodeProxyInterface::r#attach_node_tracking(self, payload)
13406 }
13407}
13408
13409impl NodeProxyInterface for NodeProxy {
13410 type SyncResponseFut =
13411 fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
13412 fn r#sync(&self) -> Self::SyncResponseFut {
13413 fn _decode(
13414 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13415 ) -> Result<(), fidl::Error> {
13416 let _response = fidl::client::decode_transaction_body::<
13417 fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
13418 fidl::encoding::DefaultFuchsiaResourceDialect,
13419 0x11ac2555cf575b54,
13420 >(_buf?)?
13421 .into_result::<NodeMarker>("sync")?;
13422 Ok(_response)
13423 }
13424 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
13425 (),
13426 0x11ac2555cf575b54,
13427 fidl::encoding::DynamicFlags::FLEXIBLE,
13428 _decode,
13429 )
13430 }
13431
13432 fn r#release(&self) -> Result<(), fidl::Error> {
13433 self.client.send::<fidl::encoding::EmptyPayload>(
13434 (),
13435 0x6a5cae7d6d6e04c6,
13436 fidl::encoding::DynamicFlags::FLEXIBLE,
13437 )
13438 }
13439
13440 fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
13441 self.client.send::<NodeSetNameRequest>(
13442 payload,
13443 0xb41f1624f48c1e9,
13444 fidl::encoding::DynamicFlags::FLEXIBLE,
13445 )
13446 }
13447
13448 fn r#set_debug_client_info(
13449 &self,
13450 mut payload: &NodeSetDebugClientInfoRequest,
13451 ) -> Result<(), fidl::Error> {
13452 self.client.send::<NodeSetDebugClientInfoRequest>(
13453 payload,
13454 0x5cde8914608d99b1,
13455 fidl::encoding::DynamicFlags::FLEXIBLE,
13456 )
13457 }
13458
13459 fn r#set_debug_timeout_log_deadline(
13460 &self,
13461 mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
13462 ) -> Result<(), fidl::Error> {
13463 self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
13464 payload,
13465 0x716b0af13d5c0806,
13466 fidl::encoding::DynamicFlags::FLEXIBLE,
13467 )
13468 }
13469
13470 fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
13471 self.client.send::<fidl::encoding::EmptyPayload>(
13472 (),
13473 0x5209c77415b4dfad,
13474 fidl::encoding::DynamicFlags::FLEXIBLE,
13475 )
13476 }
13477
13478 type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
13479 NodeGetNodeRefResponse,
13480 fidl::encoding::DefaultFuchsiaResourceDialect,
13481 >;
13482 fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
13483 fn _decode(
13484 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13485 ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
13486 let _response = fidl::client::decode_transaction_body::<
13487 fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
13488 fidl::encoding::DefaultFuchsiaResourceDialect,
13489 0x5b3d0e51614df053,
13490 >(_buf?)?
13491 .into_result::<NodeMarker>("get_node_ref")?;
13492 Ok(_response)
13493 }
13494 self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
13495 (),
13496 0x5b3d0e51614df053,
13497 fidl::encoding::DynamicFlags::FLEXIBLE,
13498 _decode,
13499 )
13500 }
13501
13502 type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
13503 NodeIsAlternateForResult,
13504 fidl::encoding::DefaultFuchsiaResourceDialect,
13505 >;
13506 fn r#is_alternate_for(
13507 &self,
13508 mut payload: NodeIsAlternateForRequest,
13509 ) -> Self::IsAlternateForResponseFut {
13510 fn _decode(
13511 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13512 ) -> Result<NodeIsAlternateForResult, fidl::Error> {
13513 let _response = fidl::client::decode_transaction_body::<
13514 fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
13515 fidl::encoding::DefaultFuchsiaResourceDialect,
13516 0x3a58e00157e0825,
13517 >(_buf?)?
13518 .into_result::<NodeMarker>("is_alternate_for")?;
13519 Ok(_response.map(|x| x))
13520 }
13521 self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
13522 &mut payload,
13523 0x3a58e00157e0825,
13524 fidl::encoding::DynamicFlags::FLEXIBLE,
13525 _decode,
13526 )
13527 }
13528
13529 type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
13530 NodeGetBufferCollectionIdResponse,
13531 fidl::encoding::DefaultFuchsiaResourceDialect,
13532 >;
13533 fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
13534 fn _decode(
13535 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13536 ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
13537 let _response = fidl::client::decode_transaction_body::<
13538 fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
13539 fidl::encoding::DefaultFuchsiaResourceDialect,
13540 0x77d19a494b78ba8c,
13541 >(_buf?)?
13542 .into_result::<NodeMarker>("get_buffer_collection_id")?;
13543 Ok(_response)
13544 }
13545 self.client.send_query_and_decode::<
13546 fidl::encoding::EmptyPayload,
13547 NodeGetBufferCollectionIdResponse,
13548 >(
13549 (),
13550 0x77d19a494b78ba8c,
13551 fidl::encoding::DynamicFlags::FLEXIBLE,
13552 _decode,
13553 )
13554 }
13555
13556 fn r#set_weak(&self) -> Result<(), fidl::Error> {
13557 self.client.send::<fidl::encoding::EmptyPayload>(
13558 (),
13559 0x22dd3ea514eeffe1,
13560 fidl::encoding::DynamicFlags::FLEXIBLE,
13561 )
13562 }
13563
13564 fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
13565 self.client.send::<NodeSetWeakOkRequest>(
13566 &mut payload,
13567 0x38a44fc4d7724be9,
13568 fidl::encoding::DynamicFlags::FLEXIBLE,
13569 )
13570 }
13571
13572 fn r#attach_node_tracking(
13573 &self,
13574 mut payload: NodeAttachNodeTrackingRequest,
13575 ) -> Result<(), fidl::Error> {
13576 self.client.send::<NodeAttachNodeTrackingRequest>(
13577 &mut payload,
13578 0x3f22f2a293d3cdac,
13579 fidl::encoding::DynamicFlags::FLEXIBLE,
13580 )
13581 }
13582}
13583
13584pub struct NodeEventStream {
13585 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
13586}
13587
13588impl std::marker::Unpin for NodeEventStream {}
13589
13590impl futures::stream::FusedStream for NodeEventStream {
13591 fn is_terminated(&self) -> bool {
13592 self.event_receiver.is_terminated()
13593 }
13594}
13595
13596impl futures::Stream for NodeEventStream {
13597 type Item = Result<NodeEvent, fidl::Error>;
13598
13599 fn poll_next(
13600 mut self: std::pin::Pin<&mut Self>,
13601 cx: &mut std::task::Context<'_>,
13602 ) -> std::task::Poll<Option<Self::Item>> {
13603 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
13604 &mut self.event_receiver,
13605 cx
13606 )?) {
13607 Some(buf) => std::task::Poll::Ready(Some(NodeEvent::decode(buf))),
13608 None => std::task::Poll::Ready(None),
13609 }
13610 }
13611}
13612
13613#[derive(Debug)]
13614pub enum NodeEvent {
13615 #[non_exhaustive]
13616 _UnknownEvent {
13617 /// Ordinal of the event that was sent.
13618 ordinal: u64,
13619 },
13620}
13621
13622impl NodeEvent {
13623 /// Decodes a message buffer as a [`NodeEvent`].
13624 fn decode(
13625 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
13626 ) -> Result<NodeEvent, fidl::Error> {
13627 let (bytes, _handles) = buf.split_mut();
13628 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
13629 debug_assert_eq!(tx_header.tx_id, 0);
13630 match tx_header.ordinal {
13631 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
13632 Ok(NodeEvent::_UnknownEvent { ordinal: tx_header.ordinal })
13633 }
13634 _ => Err(fidl::Error::UnknownOrdinal {
13635 ordinal: tx_header.ordinal,
13636 protocol_name: <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
13637 }),
13638 }
13639 }
13640}
13641
13642/// A Stream of incoming requests for fuchsia.sysmem2/Node.
13643pub struct NodeRequestStream {
13644 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
13645 is_terminated: bool,
13646}
13647
13648impl std::marker::Unpin for NodeRequestStream {}
13649
13650impl futures::stream::FusedStream for NodeRequestStream {
13651 fn is_terminated(&self) -> bool {
13652 self.is_terminated
13653 }
13654}
13655
13656impl fidl::endpoints::RequestStream for NodeRequestStream {
13657 type Protocol = NodeMarker;
13658 type ControlHandle = NodeControlHandle;
13659
13660 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
13661 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
13662 }
13663
13664 fn control_handle(&self) -> Self::ControlHandle {
13665 NodeControlHandle { inner: self.inner.clone() }
13666 }
13667
13668 fn into_inner(
13669 self,
13670 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
13671 {
13672 (self.inner, self.is_terminated)
13673 }
13674
13675 fn from_inner(
13676 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
13677 is_terminated: bool,
13678 ) -> Self {
13679 Self { inner, is_terminated }
13680 }
13681}
13682
13683impl futures::Stream for NodeRequestStream {
13684 type Item = Result<NodeRequest, fidl::Error>;
13685
13686 fn poll_next(
13687 mut self: std::pin::Pin<&mut Self>,
13688 cx: &mut std::task::Context<'_>,
13689 ) -> std::task::Poll<Option<Self::Item>> {
13690 let this = &mut *self;
13691 if this.inner.check_shutdown(cx) {
13692 this.is_terminated = true;
13693 return std::task::Poll::Ready(None);
13694 }
13695 if this.is_terminated {
13696 panic!("polled NodeRequestStream after completion");
13697 }
13698 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
13699 |bytes, handles| {
13700 match this.inner.channel().read_etc(cx, bytes, handles) {
13701 std::task::Poll::Ready(Ok(())) => {}
13702 std::task::Poll::Pending => return std::task::Poll::Pending,
13703 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
13704 this.is_terminated = true;
13705 return std::task::Poll::Ready(None);
13706 }
13707 std::task::Poll::Ready(Err(e)) => {
13708 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
13709 e.into(),
13710 ))))
13711 }
13712 }
13713
13714 // A message has been received from the channel
13715 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
13716
13717 std::task::Poll::Ready(Some(match header.ordinal {
13718 0x11ac2555cf575b54 => {
13719 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13720 let mut req = fidl::new_empty!(
13721 fidl::encoding::EmptyPayload,
13722 fidl::encoding::DefaultFuchsiaResourceDialect
13723 );
13724 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13725 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13726 Ok(NodeRequest::Sync {
13727 responder: NodeSyncResponder {
13728 control_handle: std::mem::ManuallyDrop::new(control_handle),
13729 tx_id: header.tx_id,
13730 },
13731 })
13732 }
13733 0x6a5cae7d6d6e04c6 => {
13734 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13735 let mut req = fidl::new_empty!(
13736 fidl::encoding::EmptyPayload,
13737 fidl::encoding::DefaultFuchsiaResourceDialect
13738 );
13739 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13740 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13741 Ok(NodeRequest::Release { control_handle })
13742 }
13743 0xb41f1624f48c1e9 => {
13744 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13745 let mut req = fidl::new_empty!(
13746 NodeSetNameRequest,
13747 fidl::encoding::DefaultFuchsiaResourceDialect
13748 );
13749 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
13750 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13751 Ok(NodeRequest::SetName { payload: req, control_handle })
13752 }
13753 0x5cde8914608d99b1 => {
13754 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13755 let mut req = fidl::new_empty!(
13756 NodeSetDebugClientInfoRequest,
13757 fidl::encoding::DefaultFuchsiaResourceDialect
13758 );
13759 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
13760 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13761 Ok(NodeRequest::SetDebugClientInfo { payload: req, control_handle })
13762 }
13763 0x716b0af13d5c0806 => {
13764 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13765 let mut req = fidl::new_empty!(
13766 NodeSetDebugTimeoutLogDeadlineRequest,
13767 fidl::encoding::DefaultFuchsiaResourceDialect
13768 );
13769 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
13770 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13771 Ok(NodeRequest::SetDebugTimeoutLogDeadline { payload: req, control_handle })
13772 }
13773 0x5209c77415b4dfad => {
13774 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13775 let mut req = fidl::new_empty!(
13776 fidl::encoding::EmptyPayload,
13777 fidl::encoding::DefaultFuchsiaResourceDialect
13778 );
13779 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13780 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13781 Ok(NodeRequest::SetVerboseLogging { control_handle })
13782 }
13783 0x5b3d0e51614df053 => {
13784 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13785 let mut req = fidl::new_empty!(
13786 fidl::encoding::EmptyPayload,
13787 fidl::encoding::DefaultFuchsiaResourceDialect
13788 );
13789 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13790 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13791 Ok(NodeRequest::GetNodeRef {
13792 responder: NodeGetNodeRefResponder {
13793 control_handle: std::mem::ManuallyDrop::new(control_handle),
13794 tx_id: header.tx_id,
13795 },
13796 })
13797 }
13798 0x3a58e00157e0825 => {
13799 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13800 let mut req = fidl::new_empty!(
13801 NodeIsAlternateForRequest,
13802 fidl::encoding::DefaultFuchsiaResourceDialect
13803 );
13804 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
13805 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13806 Ok(NodeRequest::IsAlternateFor {
13807 payload: req,
13808 responder: NodeIsAlternateForResponder {
13809 control_handle: std::mem::ManuallyDrop::new(control_handle),
13810 tx_id: header.tx_id,
13811 },
13812 })
13813 }
13814 0x77d19a494b78ba8c => {
13815 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13816 let mut req = fidl::new_empty!(
13817 fidl::encoding::EmptyPayload,
13818 fidl::encoding::DefaultFuchsiaResourceDialect
13819 );
13820 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13821 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13822 Ok(NodeRequest::GetBufferCollectionId {
13823 responder: NodeGetBufferCollectionIdResponder {
13824 control_handle: std::mem::ManuallyDrop::new(control_handle),
13825 tx_id: header.tx_id,
13826 },
13827 })
13828 }
13829 0x22dd3ea514eeffe1 => {
13830 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13831 let mut req = fidl::new_empty!(
13832 fidl::encoding::EmptyPayload,
13833 fidl::encoding::DefaultFuchsiaResourceDialect
13834 );
13835 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13836 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13837 Ok(NodeRequest::SetWeak { control_handle })
13838 }
13839 0x38a44fc4d7724be9 => {
13840 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13841 let mut req = fidl::new_empty!(
13842 NodeSetWeakOkRequest,
13843 fidl::encoding::DefaultFuchsiaResourceDialect
13844 );
13845 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
13846 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13847 Ok(NodeRequest::SetWeakOk { payload: req, control_handle })
13848 }
13849 0x3f22f2a293d3cdac => {
13850 header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13851 let mut req = fidl::new_empty!(
13852 NodeAttachNodeTrackingRequest,
13853 fidl::encoding::DefaultFuchsiaResourceDialect
13854 );
13855 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
13856 let control_handle = NodeControlHandle { inner: this.inner.clone() };
13857 Ok(NodeRequest::AttachNodeTracking { payload: req, control_handle })
13858 }
13859 _ if header.tx_id == 0
13860 && header
13861 .dynamic_flags()
13862 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
13863 {
13864 Ok(NodeRequest::_UnknownMethod {
13865 ordinal: header.ordinal,
13866 control_handle: NodeControlHandle { inner: this.inner.clone() },
13867 method_type: fidl::MethodType::OneWay,
13868 })
13869 }
13870 _ if header
13871 .dynamic_flags()
13872 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
13873 {
13874 this.inner.send_framework_err(
13875 fidl::encoding::FrameworkErr::UnknownMethod,
13876 header.tx_id,
13877 header.ordinal,
13878 header.dynamic_flags(),
13879 (bytes, handles),
13880 )?;
13881 Ok(NodeRequest::_UnknownMethod {
13882 ordinal: header.ordinal,
13883 control_handle: NodeControlHandle { inner: this.inner.clone() },
13884 method_type: fidl::MethodType::TwoWay,
13885 })
13886 }
13887 _ => Err(fidl::Error::UnknownOrdinal {
13888 ordinal: header.ordinal,
13889 protocol_name: <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
13890 }),
13891 }))
13892 },
13893 )
13894 }
13895}
13896
13897/// This protocol is the parent protocol for all nodes in the tree established
13898/// by [`fuchsia.sysmem2/BufferCollectionToken`] creation and
13899/// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] creation, including
13900/// [`fuchsia.sysmem2/BufferCollectionToken`](s) which have since been converted
13901/// to a [`fuchsia.sysmem2/BufferCollection`] channel.
13902///
13903/// Epitaphs are not used in this protocol.
13904#[derive(Debug)]
13905pub enum NodeRequest {
13906 /// Ensure that previous messages have been received server side. This is
13907 /// particularly useful after previous messages that created new tokens,
13908 /// because a token must be known to the sysmem server before sending the
13909 /// token to another participant.
13910 ///
13911 /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
13912 /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
13913 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
13914 /// to mitigate the possibility of a hostile/fake
13915 /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
13916 /// Another way is to pass the token to
13917 /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
13918 /// the token as part of exchanging it for a
13919 /// [`fuchsia.sysmem2/BufferCollection`] channel, and
13920 /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
13921 /// of stalling.
13922 ///
13923 /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
13924 /// and then starting and completing a `Sync`, it's then safe to send the
13925 /// `BufferCollectionToken` client ends to other participants knowing the
13926 /// server will recognize the tokens when they're sent by the other
13927 /// participants to sysmem in a
13928 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
13929 /// efficient way to create tokens while avoiding unnecessary round trips.
13930 ///
13931 /// Other options include waiting for each
13932 /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
13933 /// individually (using separate call to `Sync` after each), or calling
13934 /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
13935 /// converted to a `BufferCollection` via
13936 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
13937 /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
13938 /// the sync step and can create multiple tokens at once.
13939 Sync { responder: NodeSyncResponder },
13940 /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
13941 ///
13942 /// Normally a participant will convert a `BufferCollectionToken` into a
13943 /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
13944 /// `Release` via the token (and then close the channel immediately or
13945 /// shortly later in response to server closing the server end), which
13946 /// avoids causing buffer collection failure. Without a prior `Release`,
13947 /// closing the `BufferCollectionToken` client end will cause buffer
13948 /// collection failure.
13949 ///
13950 /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
13951 ///
13952 /// By default the server handles unexpected closure of a
13953 /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
13954 /// first) by failing the buffer collection. Partly this is to expedite
13955 /// closing VMO handles to reclaim memory when any participant fails. If a
13956 /// participant would like to cleanly close a `BufferCollection` without
13957 /// causing buffer collection failure, the participant can send `Release`
13958 /// before closing the `BufferCollection` client end. The `Release` can
13959 /// occur before or after `SetConstraints`. If before `SetConstraints`, the
13960 /// buffer collection won't require constraints from this node in order to
13961 /// allocate. If after `SetConstraints`, the constraints are retained and
13962 /// aggregated, despite the lack of `BufferCollection` connection at the
13963 /// time of constraints aggregation.
13964 ///
13965 /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
13966 ///
13967 /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
13968 /// end (without `Release` first) will trigger failure of the buffer
13969 /// collection. To close a `BufferCollectionTokenGroup` channel without
13970 /// failing the buffer collection, ensure that AllChildrenPresent() has been
13971 /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
13972 /// client end.
13973 ///
13974 /// If `Release` occurs before
13975 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
13976 /// buffer collection will fail (triggered by reception of `Release` without
13977 /// prior `AllChildrenPresent`). This is intentionally not analogous to how
13978 /// [`fuchsia.sysmem2/BufferCollection.Release`] without
13979 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
13980 /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
13981 /// close requires `AllChildrenPresent` (if not already sent), then
13982 /// `Release`, then close client end.
13983 ///
13984 /// If `Release` occurs after `AllChildrenPresent`, the children and all
13985 /// their constraints remain intact (just as they would if the
13986 /// `BufferCollectionTokenGroup` channel had remained open), and the client
13987 /// end close doesn't trigger buffer collection failure.
13988 ///
13989 /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
13990 ///
13991 /// For brevity, the per-channel-protocol paragraphs above ignore the
13992 /// separate failure domain created by
13993 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
13994 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
13995 /// unexpectedly closes (without `Release` first) and that client end is
13996 /// under a failure domain, instead of failing the whole buffer collection,
13997 /// the failure domain is failed, but the buffer collection itself is
13998 /// isolated from failure of the failure domain. Such failure domains can be
13999 /// nested, in which case only the inner-most failure domain in which the
14000 /// `Node` resides fails.
14001 Release { control_handle: NodeControlHandle },
14002 /// Set a name for VMOs in this buffer collection.
14003 ///
14004 /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
14005 /// will be truncated to fit. The name of the vmo will be suffixed with the
14006 /// buffer index within the collection (if the suffix fits within
14007 /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
14008 /// listed in the inspect data.
14009 ///
14010 /// The name only affects VMOs allocated after the name is set; this call
14011 /// does not rename existing VMOs. If multiple clients set different names
14012 /// then the larger priority value will win. Setting a new name with the
14013 /// same priority as a prior name doesn't change the name.
14014 ///
14015 /// All table fields are currently required.
14016 ///
14017 /// + request `priority` The name is only set if this is the first `SetName`
14018 /// or if `priority` is greater than any previous `priority` value in
14019 /// prior `SetName` calls across all `Node`(s) of this buffer collection.
14020 /// + request `name` The name for VMOs created under this buffer collection.
14021 SetName { payload: NodeSetNameRequest, control_handle: NodeControlHandle },
14022 /// Set information about the current client that can be used by sysmem to
14023 /// help diagnose leaking memory and allocation stalls waiting for a
14024 /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
14025 ///
14026 /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
14027 /// `Node`(s) derived from this `Node`, unless overriden by
14028 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
14029 /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
14030 ///
14031 /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
14032 /// `Allocator` is the most efficient way to ensure that all
14033 /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
14034 /// set, and is also more efficient than separately sending the same debug
14035 /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
14036 /// created [`fuchsia.sysmem2/Node`].
14037 ///
14038 /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
14039 /// indicate which client is closing their channel first, leading to subtree
14040 /// failure (which can be normal if the purpose of the subtree is over, but
14041 /// if happening earlier than expected, the client-channel-specific name can
14042 /// help diagnose where the failure is first coming from, from sysmem's
14043 /// point of view).
14044 ///
14045 /// All table fields are currently required.
14046 ///
14047 /// + request `name` This can be an arbitrary string, but the current
14048 /// process name (see `fsl::GetCurrentProcessName`) is a good default.
14049 /// + request `id` This can be an arbitrary id, but the current process ID
14050 /// (see `fsl::GetCurrentProcessKoid`) is a good default.
14051 SetDebugClientInfo { payload: NodeSetDebugClientInfoRequest, control_handle: NodeControlHandle },
14052 /// Sysmem logs a warning if sysmem hasn't seen
14053 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
14054 /// within 5 seconds after creation of a new collection.
14055 ///
14056 /// Clients can call this method to change when the log is printed. If
14057 /// multiple client set the deadline, it's unspecified which deadline will
14058 /// take effect.
14059 ///
14060 /// In most cases the default works well.
14061 ///
14062 /// All table fields are currently required.
14063 ///
14064 /// + request `deadline` The time at which sysmem will start trying to log
14065 /// the warning, unless all constraints are with sysmem by then.
14066 SetDebugTimeoutLogDeadline {
14067 payload: NodeSetDebugTimeoutLogDeadlineRequest,
14068 control_handle: NodeControlHandle,
14069 },
14070 /// This enables verbose logging for the buffer collection.
14071 ///
14072 /// Verbose logging includes constraints set via
14073 /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
14074 /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
14075 /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
14076 /// the tree of `Node`(s).
14077 ///
14078 /// Normally sysmem prints only a single line complaint when aggregation
14079 /// fails, with just the specific detailed reason that aggregation failed,
14080 /// with little surrounding context. While this is often enough to diagnose
14081 /// a problem if only a small change was made and everything was working
14082 /// before the small change, it's often not particularly helpful for getting
14083 /// a new buffer collection to work for the first time. Especially with
14084 /// more complex trees of nodes, involving things like
14085 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
14086 /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
14087 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
14088 /// subtrees of nodes, verbose logging may help in diagnosing what the tree
14089 /// looks like and why it's failing a logical allocation, or why a tree or
14090 /// subtree is failing sooner than expected.
14091 ///
14092 /// The intent of the extra logging is to be acceptable from a performance
14093 /// point of view, under the assumption that verbose logging is only enabled
14094 /// on a low number of buffer collections. If we're not tracking down a bug,
14095 /// we shouldn't send this message.
14096 SetVerboseLogging { control_handle: NodeControlHandle },
14097 /// This gets a handle that can be used as a parameter to
14098 /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
14099 /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
14100 /// client obtained this handle from this `Node`.
14101 ///
14102 /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
14103 /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
14104 /// despite the two calls typically being on different channels.
14105 ///
14106 /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
14107 ///
14108 /// All table fields are currently required.
14109 ///
14110 /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
14111 /// different `Node` channel, to prove that the client obtained the handle
14112 /// from this `Node`.
14113 GetNodeRef { responder: NodeGetNodeRefResponder },
14114 /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
14115 /// rooted at a different child token of a common parent
14116 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
14117 /// passed-in `node_ref`.
14118 ///
14119 /// This call is for assisting with admission control de-duplication, and
14120 /// with debugging.
14121 ///
14122 /// The `node_ref` must be obtained using
14123 /// [`fuchsia.sysmem2/Node.GetNodeRef`].
14124 ///
14125 /// The `node_ref` can be a duplicated handle; it's not necessary to call
14126 /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
14127 ///
14128 /// If a calling token may not actually be a valid token at all due to a
14129 /// potentially hostile/untrusted provider of the token, call
14130 /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
14131 /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
14132 /// never responds due to a calling token not being a real token (not really
14133 /// talking to sysmem). Another option is to call
14134 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
14135 /// which also validates the token along with converting it to a
14136 /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
14137 ///
14138 /// All table fields are currently required.
14139 ///
14140 /// - response `is_alternate`
14141 /// - true: The first parent node in common between the calling node and
14142 /// the `node_ref` `Node` is a `BufferCollectionTokenGroup`. This means
14143 /// that the calling `Node` and the `node_ref` `Node` will not have both
14144 /// their constraints apply - rather sysmem will choose one or the other
14145 /// of the constraints - never both. This is because only one child of
14146 /// a `BufferCollectionTokenGroup` is selected during logical
14147 /// allocation, with only that one child's subtree contributing to
14148 /// constraints aggregation.
14149 /// - false: The first parent node in common between the calling `Node`
14150 /// and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
14151 /// Currently, this means the first parent node in common is a
14152 /// `BufferCollectionToken` or `BufferCollection` (regardless of not
14153 /// `Release`ed). This means that the calling `Node` and the `node_ref`
14154 /// `Node` may have both their constraints apply during constraints
14155 /// aggregation of the logical allocation, if both `Node`(s) are
14156 /// selected by any parent `BufferCollectionTokenGroup`(s) involved. In
14157 /// this case, there is no `BufferCollectionTokenGroup` that will
14158 /// directly prevent the two `Node`(s) from both being selected and
14159 /// their constraints both aggregated, but even when false, one or both
14160 /// `Node`(s) may still be eliminated from consideration if one or both
14161 /// `Node`(s) has a direct or indirect parent
14162 /// `BufferCollectionTokenGroup` which selects a child subtree other
14163 /// than the subtree containing the calling `Node` or `node_ref` `Node`.
14164 /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
14165 /// associated with the same buffer collection as the calling `Node`.
14166 /// Another reason for this error is if the `node_ref` is an
14167 /// [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
14168 /// a real `node_ref` obtained from `GetNodeRef`.
14169 /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
14170 /// `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
14171 /// the needed rights expected on a real `node_ref`.
14172 /// * No other failing status codes are returned by this call. However,
14173 /// sysmem may add additional codes in future, so the client should have
14174 /// sensible default handling for any failing status code.
14175 IsAlternateFor { payload: NodeIsAlternateForRequest, responder: NodeIsAlternateForResponder },
14176 /// Get the buffer collection ID. This ID is also available from
14177 /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
14178 /// within the collection).
14179 ///
14180 /// This call is mainly useful in situations where we can't convey a
14181 /// [`fuchsia.sysmem2/BufferCollectionToken`] or
14182 /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
14183 /// handle, which can be joined back up with a `BufferCollection` client end
14184 /// that was created via a different path. Prefer to convey a
14185 /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
14186 ///
14187 /// Trusting a `buffer_collection_id` value from a source other than sysmem
14188 /// is analogous to trusting a koid value from a source other than zircon.
14189 /// Both should be avoided unless really necessary, and both require
14190 /// caution. In some situations it may be reasonable to refer to a
14191 /// pre-established `BufferCollection` by `buffer_collection_id` via a
14192 /// protocol for efficiency reasons, but an incoming value purporting to be
14193 /// a `buffer_collection_id` is not sufficient alone to justify granting the
14194 /// sender of the `buffer_collection_id` any capability. The sender must
14195 /// first prove to a receiver that the sender has/had a VMO or has/had a
14196 /// `BufferCollectionToken` to the same collection by sending a handle that
14197 /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
14198 /// `buffer_collection_id` value. The receiver should take care to avoid
14199 /// assuming that a sender had a `BufferCollectionToken` in cases where the
14200 /// sender has only proven that the sender had a VMO.
14201 ///
14202 /// - response `buffer_collection_id` This ID is unique per buffer
14203 /// collection per boot. Each buffer is uniquely identified by the
14204 /// `buffer_collection_id` and `buffer_index` together.
14205 GetBufferCollectionId { responder: NodeGetBufferCollectionIdResponder },
14206 /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
14207 /// created after this message to weak, which means that a client's `Node`
14208 /// client end (or a child created after this message) is not alone
14209 /// sufficient to keep allocated VMOs alive.
14210 ///
14211 /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
14212 /// `close_weak_asap`.
14213 ///
14214 /// This message is only permitted before the `Node` becomes ready for
14215 /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
14216 /// * `BufferCollectionToken`: any time
14217 /// * `BufferCollection`: before `SetConstraints`
14218 /// * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
14219 ///
14220 /// Currently, no conversion from strong `Node` to weak `Node` after ready
14221 /// for allocation is provided, but a client can simulate that by creating
14222 /// an additional `Node` before allocation and setting that additional
14223 /// `Node` to weak, and then potentially at some point later sending
14224 /// `Release` and closing the client end of the client's strong `Node`, but
14225 /// keeping the client's weak `Node`.
14226 ///
14227 /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
14228 /// collection failure (all `Node` client end(s) will see
14229 /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
14230 /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
14231 /// this situation until all `Node`(s) are ready for allocation. For initial
14232 /// allocation to succeed, at least one strong `Node` is required to exist
14233 /// at allocation time, but after that client receives VMO handles, that
14234 /// client can `BufferCollection.Release` and close the client end without
14235 /// causing this type of failure.
14236 ///
14237 /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
14238 /// imply `SetWeakOk` with `for_children_also` true, which can be sent
14239 /// separately as appropriate.
14240 SetWeak { control_handle: NodeControlHandle },
14241 /// This indicates to sysmem that the client is prepared to pay attention to
14242 /// `close_weak_asap`.
14243 ///
14244 /// If sent, this message must be before
14245 /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
14246 ///
14247 /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
14248 /// send this message before `WaitForAllBuffersAllocated`, or a parent
14249 /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
14250 /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
14251 /// trigger buffer collection failure.
14252 ///
14253 /// This message is necessary because weak sysmem VMOs have not always been
14254 /// a thing, so older clients are not aware of the need to pay attention to
14255 /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
14256 /// sysmem weak VMO handles asap. By having this message and requiring
14257 /// participants to indicate their acceptance of this aspect of the overall
14258 /// protocol, we avoid situations where an older client is delivered a weak
14259 /// VMO without any way for sysmem to get that VMO to close quickly later
14260 /// (and on a per-buffer basis).
14261 ///
14262 /// A participant that doesn't handle `close_weak_asap` and also doesn't
14263 /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
14264 /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
14265 /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
14266 /// same participant has a child/delegate which does retrieve VMOs, that
14267 /// child/delegate will need to send `SetWeakOk` before
14268 /// `WaitForAllBuffersAllocated`.
14269 ///
14270 /// + request `for_child_nodes_also` If present and true, this means direct
14271 /// child nodes of this node created after this message plus all
14272 /// descendants of those nodes will behave as if `SetWeakOk` was sent on
14273 /// those nodes. Any child node of this node that was created before this
14274 /// message is not included. This setting is "sticky" in the sense that a
14275 /// subsequent `SetWeakOk` without this bool set to true does not reset
14276 /// the server-side bool. If this creates a problem for a participant, a
14277 /// workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
14278 /// tokens instead, as appropriate. A participant should only set
14279 /// `for_child_nodes_also` true if the participant can really promise to
14280 /// obey `close_weak_asap` both for its own weak VMO handles, and for all
14281 /// weak VMO handles held by participants holding the corresponding child
14282 /// `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
14283 /// which are using sysmem(1) can be weak, despite the clients of those
14284 /// sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
14285 /// direct way to find out about `close_weak_asap`. This only applies to
14286 /// descendents of this `Node` which are using sysmem(1), not to this
14287 /// `Node` when converted directly from a sysmem2 token to a sysmem(1)
14288 /// token, which will fail allocation unless an ancestor of this `Node`
14289 /// specified `for_child_nodes_also` true.
14290 SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: NodeControlHandle },
14291 /// The server_end will be closed after this `Node` and any child nodes have
14292 /// have released their buffer counts, making those counts available for
14293 /// reservation by a different `Node` via
14294 /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
14295 ///
14296 /// The `Node` buffer counts may not be released until the entire tree of
14297 /// `Node`(s) is closed or failed, because
14298 /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
14299 /// does not immediately un-reserve the `Node` buffer counts. Instead, the
14300 /// `Node` buffer counts remain reserved until the orphaned node is later
14301 /// cleaned up.
14302 ///
14303 /// If the `Node` exceeds a fairly large number of attached eventpair server
14304 /// ends, a log message will indicate this and the `Node` (and the
14305 /// appropriate) sub-tree will fail.
14306 ///
14307 /// The `server_end` will remain open when
14308 /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
14309 /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
14310 /// [`fuchsia.sysmem2/BufferCollection`].
14311 ///
14312 /// This message can also be used with a
14313 /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
14314 AttachNodeTracking { payload: NodeAttachNodeTrackingRequest, control_handle: NodeControlHandle },
14315 /// An interaction was received which does not match any known method.
14316 #[non_exhaustive]
14317 _UnknownMethod {
14318 /// Ordinal of the method that was called.
14319 ordinal: u64,
14320 control_handle: NodeControlHandle,
14321 method_type: fidl::MethodType,
14322 },
14323}
14324
14325impl NodeRequest {
14326 #[allow(irrefutable_let_patterns)]
14327 pub fn into_sync(self) -> Option<(NodeSyncResponder)> {
14328 if let NodeRequest::Sync { responder } = self {
14329 Some((responder))
14330 } else {
14331 None
14332 }
14333 }
14334
14335 #[allow(irrefutable_let_patterns)]
14336 pub fn into_release(self) -> Option<(NodeControlHandle)> {
14337 if let NodeRequest::Release { control_handle } = self {
14338 Some((control_handle))
14339 } else {
14340 None
14341 }
14342 }
14343
14344 #[allow(irrefutable_let_patterns)]
14345 pub fn into_set_name(self) -> Option<(NodeSetNameRequest, NodeControlHandle)> {
14346 if let NodeRequest::SetName { payload, control_handle } = self {
14347 Some((payload, control_handle))
14348 } else {
14349 None
14350 }
14351 }
14352
14353 #[allow(irrefutable_let_patterns)]
14354 pub fn into_set_debug_client_info(
14355 self,
14356 ) -> Option<(NodeSetDebugClientInfoRequest, NodeControlHandle)> {
14357 if let NodeRequest::SetDebugClientInfo { payload, control_handle } = self {
14358 Some((payload, control_handle))
14359 } else {
14360 None
14361 }
14362 }
14363
14364 #[allow(irrefutable_let_patterns)]
14365 pub fn into_set_debug_timeout_log_deadline(
14366 self,
14367 ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, NodeControlHandle)> {
14368 if let NodeRequest::SetDebugTimeoutLogDeadline { payload, control_handle } = self {
14369 Some((payload, control_handle))
14370 } else {
14371 None
14372 }
14373 }
14374
14375 #[allow(irrefutable_let_patterns)]
14376 pub fn into_set_verbose_logging(self) -> Option<(NodeControlHandle)> {
14377 if let NodeRequest::SetVerboseLogging { control_handle } = self {
14378 Some((control_handle))
14379 } else {
14380 None
14381 }
14382 }
14383
14384 #[allow(irrefutable_let_patterns)]
14385 pub fn into_get_node_ref(self) -> Option<(NodeGetNodeRefResponder)> {
14386 if let NodeRequest::GetNodeRef { responder } = self {
14387 Some((responder))
14388 } else {
14389 None
14390 }
14391 }
14392
14393 #[allow(irrefutable_let_patterns)]
14394 pub fn into_is_alternate_for(
14395 self,
14396 ) -> Option<(NodeIsAlternateForRequest, NodeIsAlternateForResponder)> {
14397 if let NodeRequest::IsAlternateFor { payload, responder } = self {
14398 Some((payload, responder))
14399 } else {
14400 None
14401 }
14402 }
14403
14404 #[allow(irrefutable_let_patterns)]
14405 pub fn into_get_buffer_collection_id(self) -> Option<(NodeGetBufferCollectionIdResponder)> {
14406 if let NodeRequest::GetBufferCollectionId { responder } = self {
14407 Some((responder))
14408 } else {
14409 None
14410 }
14411 }
14412
14413 #[allow(irrefutable_let_patterns)]
14414 pub fn into_set_weak(self) -> Option<(NodeControlHandle)> {
14415 if let NodeRequest::SetWeak { control_handle } = self {
14416 Some((control_handle))
14417 } else {
14418 None
14419 }
14420 }
14421
14422 #[allow(irrefutable_let_patterns)]
14423 pub fn into_set_weak_ok(self) -> Option<(NodeSetWeakOkRequest, NodeControlHandle)> {
14424 if let NodeRequest::SetWeakOk { payload, control_handle } = self {
14425 Some((payload, control_handle))
14426 } else {
14427 None
14428 }
14429 }
14430
14431 #[allow(irrefutable_let_patterns)]
14432 pub fn into_attach_node_tracking(
14433 self,
14434 ) -> Option<(NodeAttachNodeTrackingRequest, NodeControlHandle)> {
14435 if let NodeRequest::AttachNodeTracking { payload, control_handle } = self {
14436 Some((payload, control_handle))
14437 } else {
14438 None
14439 }
14440 }
14441
14442 /// Name of the method defined in FIDL
14443 pub fn method_name(&self) -> &'static str {
14444 match *self {
14445 NodeRequest::Sync { .. } => "sync",
14446 NodeRequest::Release { .. } => "release",
14447 NodeRequest::SetName { .. } => "set_name",
14448 NodeRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
14449 NodeRequest::SetDebugTimeoutLogDeadline { .. } => "set_debug_timeout_log_deadline",
14450 NodeRequest::SetVerboseLogging { .. } => "set_verbose_logging",
14451 NodeRequest::GetNodeRef { .. } => "get_node_ref",
14452 NodeRequest::IsAlternateFor { .. } => "is_alternate_for",
14453 NodeRequest::GetBufferCollectionId { .. } => "get_buffer_collection_id",
14454 NodeRequest::SetWeak { .. } => "set_weak",
14455 NodeRequest::SetWeakOk { .. } => "set_weak_ok",
14456 NodeRequest::AttachNodeTracking { .. } => "attach_node_tracking",
14457 NodeRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
14458 "unknown one-way method"
14459 }
14460 NodeRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
14461 "unknown two-way method"
14462 }
14463 }
14464 }
14465}
14466
14467#[derive(Debug, Clone)]
14468pub struct NodeControlHandle {
14469 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
14470}
14471
14472impl fidl::endpoints::ControlHandle for NodeControlHandle {
14473 fn shutdown(&self) {
14474 self.inner.shutdown()
14475 }
14476 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
14477 self.inner.shutdown_with_epitaph(status)
14478 }
14479
14480 fn is_closed(&self) -> bool {
14481 self.inner.channel().is_closed()
14482 }
14483 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
14484 self.inner.channel().on_closed()
14485 }
14486
14487 #[cfg(target_os = "fuchsia")]
14488 fn signal_peer(
14489 &self,
14490 clear_mask: zx::Signals,
14491 set_mask: zx::Signals,
14492 ) -> Result<(), zx_status::Status> {
14493 use fidl::Peered;
14494 self.inner.channel().signal_peer(clear_mask, set_mask)
14495 }
14496}
14497
14498impl NodeControlHandle {}
14499
14500#[must_use = "FIDL methods require a response to be sent"]
14501#[derive(Debug)]
14502pub struct NodeSyncResponder {
14503 control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14504 tx_id: u32,
14505}
14506
14507/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14508/// if the responder is dropped without sending a response, so that the client
14509/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14510impl std::ops::Drop for NodeSyncResponder {
14511 fn drop(&mut self) {
14512 self.control_handle.shutdown();
14513 // Safety: drops once, never accessed again
14514 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14515 }
14516}
14517
14518impl fidl::endpoints::Responder for NodeSyncResponder {
14519 type ControlHandle = NodeControlHandle;
14520
14521 fn control_handle(&self) -> &NodeControlHandle {
14522 &self.control_handle
14523 }
14524
14525 fn drop_without_shutdown(mut self) {
14526 // Safety: drops once, never accessed again due to mem::forget
14527 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14528 // Prevent Drop from running (which would shut down the channel)
14529 std::mem::forget(self);
14530 }
14531}
14532
14533impl NodeSyncResponder {
14534 /// Sends a response to the FIDL transaction.
14535 ///
14536 /// Sets the channel to shutdown if an error occurs.
14537 pub fn send(self) -> Result<(), fidl::Error> {
14538 let _result = self.send_raw();
14539 if _result.is_err() {
14540 self.control_handle.shutdown();
14541 }
14542 self.drop_without_shutdown();
14543 _result
14544 }
14545
14546 /// Similar to "send" but does not shutdown the channel if an error occurs.
14547 pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
14548 let _result = self.send_raw();
14549 self.drop_without_shutdown();
14550 _result
14551 }
14552
14553 fn send_raw(&self) -> Result<(), fidl::Error> {
14554 self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
14555 fidl::encoding::Flexible::new(()),
14556 self.tx_id,
14557 0x11ac2555cf575b54,
14558 fidl::encoding::DynamicFlags::FLEXIBLE,
14559 )
14560 }
14561}
14562
14563#[must_use = "FIDL methods require a response to be sent"]
14564#[derive(Debug)]
14565pub struct NodeGetNodeRefResponder {
14566 control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14567 tx_id: u32,
14568}
14569
14570/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14571/// if the responder is dropped without sending a response, so that the client
14572/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14573impl std::ops::Drop for NodeGetNodeRefResponder {
14574 fn drop(&mut self) {
14575 self.control_handle.shutdown();
14576 // Safety: drops once, never accessed again
14577 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14578 }
14579}
14580
14581impl fidl::endpoints::Responder for NodeGetNodeRefResponder {
14582 type ControlHandle = NodeControlHandle;
14583
14584 fn control_handle(&self) -> &NodeControlHandle {
14585 &self.control_handle
14586 }
14587
14588 fn drop_without_shutdown(mut self) {
14589 // Safety: drops once, never accessed again due to mem::forget
14590 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14591 // Prevent Drop from running (which would shut down the channel)
14592 std::mem::forget(self);
14593 }
14594}
14595
14596impl NodeGetNodeRefResponder {
14597 /// Sends a response to the FIDL transaction.
14598 ///
14599 /// Sets the channel to shutdown if an error occurs.
14600 pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
14601 let _result = self.send_raw(payload);
14602 if _result.is_err() {
14603 self.control_handle.shutdown();
14604 }
14605 self.drop_without_shutdown();
14606 _result
14607 }
14608
14609 /// Similar to "send" but does not shutdown the channel if an error occurs.
14610 pub fn send_no_shutdown_on_err(
14611 self,
14612 mut payload: NodeGetNodeRefResponse,
14613 ) -> Result<(), fidl::Error> {
14614 let _result = self.send_raw(payload);
14615 self.drop_without_shutdown();
14616 _result
14617 }
14618
14619 fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
14620 self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
14621 fidl::encoding::Flexible::new(&mut payload),
14622 self.tx_id,
14623 0x5b3d0e51614df053,
14624 fidl::encoding::DynamicFlags::FLEXIBLE,
14625 )
14626 }
14627}
14628
14629#[must_use = "FIDL methods require a response to be sent"]
14630#[derive(Debug)]
14631pub struct NodeIsAlternateForResponder {
14632 control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14633 tx_id: u32,
14634}
14635
14636/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14637/// if the responder is dropped without sending a response, so that the client
14638/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14639impl std::ops::Drop for NodeIsAlternateForResponder {
14640 fn drop(&mut self) {
14641 self.control_handle.shutdown();
14642 // Safety: drops once, never accessed again
14643 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14644 }
14645}
14646
14647impl fidl::endpoints::Responder for NodeIsAlternateForResponder {
14648 type ControlHandle = NodeControlHandle;
14649
14650 fn control_handle(&self) -> &NodeControlHandle {
14651 &self.control_handle
14652 }
14653
14654 fn drop_without_shutdown(mut self) {
14655 // Safety: drops once, never accessed again due to mem::forget
14656 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14657 // Prevent Drop from running (which would shut down the channel)
14658 std::mem::forget(self);
14659 }
14660}
14661
14662impl NodeIsAlternateForResponder {
14663 /// Sends a response to the FIDL transaction.
14664 ///
14665 /// Sets the channel to shutdown if an error occurs.
14666 pub fn send(
14667 self,
14668 mut result: Result<&NodeIsAlternateForResponse, Error>,
14669 ) -> Result<(), fidl::Error> {
14670 let _result = self.send_raw(result);
14671 if _result.is_err() {
14672 self.control_handle.shutdown();
14673 }
14674 self.drop_without_shutdown();
14675 _result
14676 }
14677
14678 /// Similar to "send" but does not shutdown the channel if an error occurs.
14679 pub fn send_no_shutdown_on_err(
14680 self,
14681 mut result: Result<&NodeIsAlternateForResponse, Error>,
14682 ) -> Result<(), fidl::Error> {
14683 let _result = self.send_raw(result);
14684 self.drop_without_shutdown();
14685 _result
14686 }
14687
14688 fn send_raw(
14689 &self,
14690 mut result: Result<&NodeIsAlternateForResponse, Error>,
14691 ) -> Result<(), fidl::Error> {
14692 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
14693 NodeIsAlternateForResponse,
14694 Error,
14695 >>(
14696 fidl::encoding::FlexibleResult::new(result),
14697 self.tx_id,
14698 0x3a58e00157e0825,
14699 fidl::encoding::DynamicFlags::FLEXIBLE,
14700 )
14701 }
14702}
14703
14704#[must_use = "FIDL methods require a response to be sent"]
14705#[derive(Debug)]
14706pub struct NodeGetBufferCollectionIdResponder {
14707 control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14708 tx_id: u32,
14709}
14710
14711/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14712/// if the responder is dropped without sending a response, so that the client
14713/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14714impl std::ops::Drop for NodeGetBufferCollectionIdResponder {
14715 fn drop(&mut self) {
14716 self.control_handle.shutdown();
14717 // Safety: drops once, never accessed again
14718 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14719 }
14720}
14721
14722impl fidl::endpoints::Responder for NodeGetBufferCollectionIdResponder {
14723 type ControlHandle = NodeControlHandle;
14724
14725 fn control_handle(&self) -> &NodeControlHandle {
14726 &self.control_handle
14727 }
14728
14729 fn drop_without_shutdown(mut self) {
14730 // Safety: drops once, never accessed again due to mem::forget
14731 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14732 // Prevent Drop from running (which would shut down the channel)
14733 std::mem::forget(self);
14734 }
14735}
14736
14737impl NodeGetBufferCollectionIdResponder {
14738 /// Sends a response to the FIDL transaction.
14739 ///
14740 /// Sets the channel to shutdown if an error occurs.
14741 pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
14742 let _result = self.send_raw(payload);
14743 if _result.is_err() {
14744 self.control_handle.shutdown();
14745 }
14746 self.drop_without_shutdown();
14747 _result
14748 }
14749
14750 /// Similar to "send" but does not shutdown the channel if an error occurs.
14751 pub fn send_no_shutdown_on_err(
14752 self,
14753 mut payload: &NodeGetBufferCollectionIdResponse,
14754 ) -> Result<(), fidl::Error> {
14755 let _result = self.send_raw(payload);
14756 self.drop_without_shutdown();
14757 _result
14758 }
14759
14760 fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
14761 self.control_handle
14762 .inner
14763 .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
14764 fidl::encoding::Flexible::new(payload),
14765 self.tx_id,
14766 0x77d19a494b78ba8c,
14767 fidl::encoding::DynamicFlags::FLEXIBLE,
14768 )
14769 }
14770}
14771
14772#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
14773pub struct SecureMemMarker;
14774
14775impl fidl::endpoints::ProtocolMarker for SecureMemMarker {
14776 type Proxy = SecureMemProxy;
14777 type RequestStream = SecureMemRequestStream;
14778 #[cfg(target_os = "fuchsia")]
14779 type SynchronousProxy = SecureMemSynchronousProxy;
14780
14781 const DEBUG_NAME: &'static str = "(anonymous) SecureMem";
14782}
14783pub type SecureMemGetPhysicalSecureHeapsResult =
14784 Result<SecureMemGetPhysicalSecureHeapsResponse, Error>;
14785pub type SecureMemGetDynamicSecureHeapsResult =
14786 Result<SecureMemGetDynamicSecureHeapsResponse, Error>;
14787pub type SecureMemGetPhysicalSecureHeapPropertiesResult =
14788 Result<SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>;
14789pub type SecureMemAddSecureHeapPhysicalRangeResult = Result<(), Error>;
14790pub type SecureMemDeleteSecureHeapPhysicalRangeResult = Result<(), Error>;
14791pub type SecureMemModifySecureHeapPhysicalRangeResult = Result<(), Error>;
14792pub type SecureMemZeroSubRangeResult = Result<(), Error>;
14793
14794pub trait SecureMemProxyInterface: Send + Sync {
14795 type GetPhysicalSecureHeapsResponseFut: std::future::Future<Output = Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error>>
14796 + Send;
14797 fn r#get_physical_secure_heaps(&self) -> Self::GetPhysicalSecureHeapsResponseFut;
14798 type GetDynamicSecureHeapsResponseFut: std::future::Future<Output = Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error>>
14799 + Send;
14800 fn r#get_dynamic_secure_heaps(&self) -> Self::GetDynamicSecureHeapsResponseFut;
14801 type GetPhysicalSecureHeapPropertiesResponseFut: std::future::Future<
14802 Output = Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error>,
14803 > + Send;
14804 fn r#get_physical_secure_heap_properties(
14805 &self,
14806 payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
14807 ) -> Self::GetPhysicalSecureHeapPropertiesResponseFut;
14808 type AddSecureHeapPhysicalRangeResponseFut: std::future::Future<Output = Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error>>
14809 + Send;
14810 fn r#add_secure_heap_physical_range(
14811 &self,
14812 payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
14813 ) -> Self::AddSecureHeapPhysicalRangeResponseFut;
14814 type DeleteSecureHeapPhysicalRangeResponseFut: std::future::Future<
14815 Output = Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error>,
14816 > + Send;
14817 fn r#delete_secure_heap_physical_range(
14818 &self,
14819 payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
14820 ) -> Self::DeleteSecureHeapPhysicalRangeResponseFut;
14821 type ModifySecureHeapPhysicalRangeResponseFut: std::future::Future<
14822 Output = Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error>,
14823 > + Send;
14824 fn r#modify_secure_heap_physical_range(
14825 &self,
14826 payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
14827 ) -> Self::ModifySecureHeapPhysicalRangeResponseFut;
14828 type ZeroSubRangeResponseFut: std::future::Future<Output = Result<SecureMemZeroSubRangeResult, fidl::Error>>
14829 + Send;
14830 fn r#zero_sub_range(
14831 &self,
14832 payload: &SecureMemZeroSubRangeRequest,
14833 ) -> Self::ZeroSubRangeResponseFut;
14834}
14835#[derive(Debug)]
14836#[cfg(target_os = "fuchsia")]
14837pub struct SecureMemSynchronousProxy {
14838 client: fidl::client::sync::Client,
14839}
14840
14841#[cfg(target_os = "fuchsia")]
14842impl fidl::endpoints::SynchronousProxy for SecureMemSynchronousProxy {
14843 type Proxy = SecureMemProxy;
14844 type Protocol = SecureMemMarker;
14845
14846 fn from_channel(inner: fidl::Channel) -> Self {
14847 Self::new(inner)
14848 }
14849
14850 fn into_channel(self) -> fidl::Channel {
14851 self.client.into_channel()
14852 }
14853
14854 fn as_channel(&self) -> &fidl::Channel {
14855 self.client.as_channel()
14856 }
14857}
14858
14859#[cfg(target_os = "fuchsia")]
14860impl SecureMemSynchronousProxy {
14861 pub fn new(channel: fidl::Channel) -> Self {
14862 let protocol_name = <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
14863 Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
14864 }
14865
14866 pub fn into_channel(self) -> fidl::Channel {
14867 self.client.into_channel()
14868 }
14869
14870 /// Waits until an event arrives and returns it. It is safe for other
14871 /// threads to make concurrent requests while waiting for an event.
14872 pub fn wait_for_event(
14873 &self,
14874 deadline: zx::MonotonicInstant,
14875 ) -> Result<SecureMemEvent, fidl::Error> {
14876 SecureMemEvent::decode(self.client.wait_for_event(deadline)?)
14877 }
14878
14879 /// Gets the physical address and length of any secure heap whose physical
14880 /// range is configured via the TEE.
14881 ///
14882 /// Presently, these will be fixed physical addresses and lengths, with the
14883 /// location plumbed via the TEE.
14884 ///
14885 /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
14886 /// when there isn't any special heap-specific per-VMO setup or teardown
14887 /// required.
14888 ///
14889 /// The physical range must be secured/protected by the TEE before the
14890 /// securemem driver responds to this request with success.
14891 ///
14892 /// Sysmem should only call this once. Returning zero heaps is not a
14893 /// failure.
14894 ///
14895 /// Errors:
14896 /// * PROTOCOL_DEVIATION - called more than once.
14897 /// * UNSPECIFIED - generic internal error (such as in communication
14898 /// with TEE which doesn't generate zx_status_t errors).
14899 /// * other errors are allowed; any other errors should be treated the same
14900 /// as UNSPECIFIED.
14901 pub fn r#get_physical_secure_heaps(
14902 &self,
14903 ___deadline: zx::MonotonicInstant,
14904 ) -> Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error> {
14905 let _response = self.client.send_query::<
14906 fidl::encoding::EmptyPayload,
14907 fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapsResponse, Error>,
14908 >(
14909 (),
14910 0x38716300592073e3,
14911 fidl::encoding::DynamicFlags::FLEXIBLE,
14912 ___deadline,
14913 )?
14914 .into_result::<SecureMemMarker>("get_physical_secure_heaps")?;
14915 Ok(_response.map(|x| x))
14916 }
14917
14918 /// Gets information about any secure heaps whose physical pages are not
14919 /// configured by the TEE, but by sysmem.
14920 ///
14921 /// Sysmem should only call this once. Returning zero heaps is not a
14922 /// failure.
14923 ///
14924 /// Errors:
14925 /// * PROTOCOL_DEVIATION - called more than once.
14926 /// * UNSPECIFIED - generic internal error (such as in communication
14927 /// with TEE which doesn't generate zx_status_t errors).
14928 /// * other errors are allowed; any other errors should be treated the same
14929 /// as UNSPECIFIED.
14930 pub fn r#get_dynamic_secure_heaps(
14931 &self,
14932 ___deadline: zx::MonotonicInstant,
14933 ) -> Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error> {
14934 let _response = self.client.send_query::<
14935 fidl::encoding::EmptyPayload,
14936 fidl::encoding::FlexibleResultType<SecureMemGetDynamicSecureHeapsResponse, Error>,
14937 >(
14938 (),
14939 0x1190847f99952834,
14940 fidl::encoding::DynamicFlags::FLEXIBLE,
14941 ___deadline,
14942 )?
14943 .into_result::<SecureMemMarker>("get_dynamic_secure_heaps")?;
14944 Ok(_response.map(|x| x))
14945 }
14946
14947 /// This request from sysmem to the securemem driver gets the properties of
14948 /// a protected/secure heap.
14949 ///
14950 /// This only handles heaps with a single contiguous physical extent.
14951 ///
14952 /// The heap's entire physical range is indicated in case this request needs
14953 /// some physical space to auto-detect how many ranges are REE-usable. Any
14954 /// temporary HW protection ranges will be deleted before this request
14955 /// completes.
14956 ///
14957 /// Errors:
14958 /// * UNSPECIFIED - generic internal error (such as in communication
14959 /// with TEE which doesn't generate zx_status_t errors).
14960 /// * other errors are allowed; any other errors should be treated the same
14961 /// as UNSPECIFIED.
14962 pub fn r#get_physical_secure_heap_properties(
14963 &self,
14964 mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
14965 ___deadline: zx::MonotonicInstant,
14966 ) -> Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error> {
14967 let _response = self.client.send_query::<
14968 SecureMemGetPhysicalSecureHeapPropertiesRequest,
14969 fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
14970 >(
14971 payload,
14972 0xc6f06889009c7bc,
14973 fidl::encoding::DynamicFlags::FLEXIBLE,
14974 ___deadline,
14975 )?
14976 .into_result::<SecureMemMarker>("get_physical_secure_heap_properties")?;
14977 Ok(_response.map(|x| x))
14978 }
14979
14980 /// This request from sysmem to the securemem driver conveys a physical
14981 /// range to add, for a heap whose physical range(s) are set up via
14982 /// sysmem.
14983 ///
14984 /// Only sysmem can call this because only sysmem is handed the client end
14985 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
14986 /// securemem driver is the server end of this protocol.
14987 ///
14988 /// The securemem driver must configure all the covered offsets as protected
14989 /// before responding to this message with success.
14990 ///
14991 /// On failure, the securemem driver must ensure the protected range was not
14992 /// created.
14993 ///
14994 /// Sysmem must only call this up to once if dynamic_protection_ranges
14995 /// false.
14996 ///
14997 /// If dynamic_protection_ranges is true, sysmem can call this multiple
14998 /// times as long as the current number of ranges never exceeds
14999 /// max_protected_range_count.
15000 ///
15001 /// The caller must not attempt to add a range that matches an
15002 /// already-existing range. Added ranges can overlap each other as long as
15003 /// no two ranges match exactly.
15004 ///
15005 /// Errors:
15006 /// * PROTOCOL_DEVIATION - called more than once when
15007 /// !dynamic_protection_ranges. Adding a heap that would cause overall
15008 /// heap count to exceed max_protected_range_count. Unexpected heap, or
15009 /// range that doesn't conform to protected_range_granularity. See log.
15010 /// * UNSPECIFIED - generic internal error (such as in communication
15011 /// with TEE which doesn't generate zx_status_t errors).
15012 /// * other errors are possible, such as from communication failures or
15013 /// server propagation of failures.
15014 pub fn r#add_secure_heap_physical_range(
15015 &self,
15016 mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15017 ___deadline: zx::MonotonicInstant,
15018 ) -> Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error> {
15019 let _response = self.client.send_query::<
15020 SecureMemAddSecureHeapPhysicalRangeRequest,
15021 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15022 >(
15023 payload,
15024 0x35f695b9b6c7217a,
15025 fidl::encoding::DynamicFlags::FLEXIBLE,
15026 ___deadline,
15027 )?
15028 .into_result::<SecureMemMarker>("add_secure_heap_physical_range")?;
15029 Ok(_response.map(|x| x))
15030 }
15031
15032 /// This request from sysmem to the securemem driver conveys a physical
15033 /// range to delete, for a heap whose physical range(s) are set up via
15034 /// sysmem.
15035 ///
15036 /// Only sysmem can call this because only sysmem is handed the client end
15037 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15038 /// securemem driver is the server end of this protocol.
15039 ///
15040 /// The securemem driver must configure all the covered offsets as not
15041 /// protected before responding to this message with success.
15042 ///
15043 /// On failure, the securemem driver must ensure the protected range was not
15044 /// deleted.
15045 ///
15046 /// Sysmem must not call this if dynamic_protection_ranges false.
15047 ///
15048 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15049 /// on various ranges that exist at the time of the call.
15050 ///
15051 /// If any portion of the range being deleted is not also covered by another
15052 /// protected range, then any ongoing DMA to any part of the entire range
15053 /// may be interrupted / may fail, potentially in a way that's disruptive to
15054 /// the entire system (bus lockup or similar, depending on device details).
15055 /// Therefore, the caller must ensure that no ongoing DMA is occurring to
15056 /// any portion of the range being deleted, unless the caller has other
15057 /// active ranges covering every block of the range being deleted. Ongoing
15058 /// DMA to/from blocks outside the range being deleted is never impacted by
15059 /// the deletion.
15060 ///
15061 /// Errors:
15062 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15063 /// Unexpected heap, or range that doesn't conform to
15064 /// protected_range_granularity.
15065 /// * UNSPECIFIED - generic internal error (such as in communication
15066 /// with TEE which doesn't generate zx_status_t errors).
15067 /// * NOT_FOUND - the specified range is not found.
15068 /// * other errors are possible, such as from communication failures or
15069 /// server propagation of failures.
15070 pub fn r#delete_secure_heap_physical_range(
15071 &self,
15072 mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15073 ___deadline: zx::MonotonicInstant,
15074 ) -> Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error> {
15075 let _response = self.client.send_query::<
15076 SecureMemDeleteSecureHeapPhysicalRangeRequest,
15077 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15078 >(
15079 payload,
15080 0xeaa58c650264c9e,
15081 fidl::encoding::DynamicFlags::FLEXIBLE,
15082 ___deadline,
15083 )?
15084 .into_result::<SecureMemMarker>("delete_secure_heap_physical_range")?;
15085 Ok(_response.map(|x| x))
15086 }
15087
15088 /// This request from sysmem to the securemem driver conveys a physical
15089 /// range to modify and its new base and length, for a heap whose physical
15090 /// range(s) are set up via sysmem.
15091 ///
15092 /// Only sysmem can call this because only sysmem is handed the client end
15093 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15094 /// securemem driver is the server end of this protocol.
15095 ///
15096 /// The securemem driver must configure the range to cover only the new
15097 /// offsets before responding to this message with success.
15098 ///
15099 /// On failure, the securemem driver must ensure the range was not changed.
15100 ///
15101 /// Sysmem must not call this if dynamic_protection_ranges false. Sysmem
15102 /// must not call this if !is_mod_protected_range_available.
15103 ///
15104 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15105 /// on various ranges that exist at the time of the call.
15106 ///
15107 /// The range must only be modified at one end or the other, but not both.
15108 /// If the range is getting shorter, and the un-covered blocks are not
15109 /// covered by other active ranges, any ongoing DMA to the entire range
15110 /// that's geting shorter may fail in a way that disrupts the entire system
15111 /// (bus lockup or similar), so the caller must ensure that no DMA is
15112 /// ongoing to any portion of a range that is getting shorter, unless the
15113 /// blocks being un-covered by the modification to this range are all
15114 /// covered by other active ranges, in which case no disruption to ongoing
15115 /// DMA will occur.
15116 ///
15117 /// If a range is modified to become <= zero length, the range is deleted.
15118 ///
15119 /// Errors:
15120 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15121 /// Unexpected heap, or old_range or new_range that doesn't conform to
15122 /// protected_range_granularity, or old_range and new_range differ in
15123 /// both begin and end (disallowed).
15124 /// * UNSPECIFIED - generic internal error (such as in communication
15125 /// with TEE which doesn't generate zx_status_t errors).
15126 /// * NOT_FOUND - the specified range is not found.
15127 /// * other errors are possible, such as from communication failures or
15128 /// server propagation of failures.
15129 pub fn r#modify_secure_heap_physical_range(
15130 &self,
15131 mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15132 ___deadline: zx::MonotonicInstant,
15133 ) -> Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error> {
15134 let _response = self.client.send_query::<
15135 SecureMemModifySecureHeapPhysicalRangeRequest,
15136 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15137 >(
15138 payload,
15139 0x60b7448aa1187734,
15140 fidl::encoding::DynamicFlags::FLEXIBLE,
15141 ___deadline,
15142 )?
15143 .into_result::<SecureMemMarker>("modify_secure_heap_physical_range")?;
15144 Ok(_response.map(|x| x))
15145 }
15146
15147 /// Zero a sub-range of a currently-existing physical range added via
15148 /// AddSecureHeapPhysicalRange(). The sub-range must be fully covered by
15149 /// exactly one physical range, and must not overlap with any other
15150 /// physical range.
15151 ///
15152 /// is_covering_range_explicit - When true, the covering range must be one
15153 /// of the ranges explicitly created via AddSecureHeapPhysicalRange(),
15154 /// possibly modified since. When false, the covering range must not
15155 /// be one of the ranges explicitly created via
15156 /// AddSecureHeapPhysicalRange(), but the covering range must exist as
15157 /// a covering range not created via AddSecureHeapPhysicalRange(). The
15158 /// covering range is typically the entire physical range (or a range
15159 /// which covers even more) of a heap configured by the TEE and whose
15160 /// configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
15161 ///
15162 /// Ongoing DMA is not disrupted by this request.
15163 ///
15164 /// Errors:
15165 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15166 /// Unexpected heap.
15167 /// * UNSPECIFIED - generic internal error (such as in communication
15168 /// with TEE which doesn't generate zx_status_t errors).
15169 /// * other errors are possible, such as from communication failures or
15170 /// server propagation of failures.
15171 pub fn r#zero_sub_range(
15172 &self,
15173 mut payload: &SecureMemZeroSubRangeRequest,
15174 ___deadline: zx::MonotonicInstant,
15175 ) -> Result<SecureMemZeroSubRangeResult, fidl::Error> {
15176 let _response = self.client.send_query::<
15177 SecureMemZeroSubRangeRequest,
15178 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15179 >(
15180 payload,
15181 0x5b25b7901a385ce5,
15182 fidl::encoding::DynamicFlags::FLEXIBLE,
15183 ___deadline,
15184 )?
15185 .into_result::<SecureMemMarker>("zero_sub_range")?;
15186 Ok(_response.map(|x| x))
15187 }
15188}
15189
15190#[cfg(target_os = "fuchsia")]
15191impl From<SecureMemSynchronousProxy> for zx::Handle {
15192 fn from(value: SecureMemSynchronousProxy) -> Self {
15193 value.into_channel().into()
15194 }
15195}
15196
15197#[cfg(target_os = "fuchsia")]
15198impl From<fidl::Channel> for SecureMemSynchronousProxy {
15199 fn from(value: fidl::Channel) -> Self {
15200 Self::new(value)
15201 }
15202}
15203
15204#[cfg(target_os = "fuchsia")]
15205impl fidl::endpoints::FromClient for SecureMemSynchronousProxy {
15206 type Protocol = SecureMemMarker;
15207
15208 fn from_client(value: fidl::endpoints::ClientEnd<SecureMemMarker>) -> Self {
15209 Self::new(value.into_channel())
15210 }
15211}
15212
15213#[derive(Debug, Clone)]
15214pub struct SecureMemProxy {
15215 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
15216}
15217
15218impl fidl::endpoints::Proxy for SecureMemProxy {
15219 type Protocol = SecureMemMarker;
15220
15221 fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
15222 Self::new(inner)
15223 }
15224
15225 fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
15226 self.client.into_channel().map_err(|client| Self { client })
15227 }
15228
15229 fn as_channel(&self) -> &::fidl::AsyncChannel {
15230 self.client.as_channel()
15231 }
15232}
15233
15234impl SecureMemProxy {
15235 /// Create a new Proxy for fuchsia.sysmem2/SecureMem.
15236 pub fn new(channel: ::fidl::AsyncChannel) -> Self {
15237 let protocol_name = <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
15238 Self { client: fidl::client::Client::new(channel, protocol_name) }
15239 }
15240
15241 /// Get a Stream of events from the remote end of the protocol.
15242 ///
15243 /// # Panics
15244 ///
15245 /// Panics if the event stream was already taken.
15246 pub fn take_event_stream(&self) -> SecureMemEventStream {
15247 SecureMemEventStream { event_receiver: self.client.take_event_receiver() }
15248 }
15249
15250 /// Gets the physical address and length of any secure heap whose physical
15251 /// range is configured via the TEE.
15252 ///
15253 /// Presently, these will be fixed physical addresses and lengths, with the
15254 /// location plumbed via the TEE.
15255 ///
15256 /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
15257 /// when there isn't any special heap-specific per-VMO setup or teardown
15258 /// required.
15259 ///
15260 /// The physical range must be secured/protected by the TEE before the
15261 /// securemem driver responds to this request with success.
15262 ///
15263 /// Sysmem should only call this once. Returning zero heaps is not a
15264 /// failure.
15265 ///
15266 /// Errors:
15267 /// * PROTOCOL_DEVIATION - called more than once.
15268 /// * UNSPECIFIED - generic internal error (such as in communication
15269 /// with TEE which doesn't generate zx_status_t errors).
15270 /// * other errors are allowed; any other errors should be treated the same
15271 /// as UNSPECIFIED.
15272 pub fn r#get_physical_secure_heaps(
15273 &self,
15274 ) -> fidl::client::QueryResponseFut<
15275 SecureMemGetPhysicalSecureHeapsResult,
15276 fidl::encoding::DefaultFuchsiaResourceDialect,
15277 > {
15278 SecureMemProxyInterface::r#get_physical_secure_heaps(self)
15279 }
15280
15281 /// Gets information about any secure heaps whose physical pages are not
15282 /// configured by the TEE, but by sysmem.
15283 ///
15284 /// Sysmem should only call this once. Returning zero heaps is not a
15285 /// failure.
15286 ///
15287 /// Errors:
15288 /// * PROTOCOL_DEVIATION - called more than once.
15289 /// * UNSPECIFIED - generic internal error (such as in communication
15290 /// with TEE which doesn't generate zx_status_t errors).
15291 /// * other errors are allowed; any other errors should be treated the same
15292 /// as UNSPECIFIED.
15293 pub fn r#get_dynamic_secure_heaps(
15294 &self,
15295 ) -> fidl::client::QueryResponseFut<
15296 SecureMemGetDynamicSecureHeapsResult,
15297 fidl::encoding::DefaultFuchsiaResourceDialect,
15298 > {
15299 SecureMemProxyInterface::r#get_dynamic_secure_heaps(self)
15300 }
15301
15302 /// This request from sysmem to the securemem driver gets the properties of
15303 /// a protected/secure heap.
15304 ///
15305 /// This only handles heaps with a single contiguous physical extent.
15306 ///
15307 /// The heap's entire physical range is indicated in case this request needs
15308 /// some physical space to auto-detect how many ranges are REE-usable. Any
15309 /// temporary HW protection ranges will be deleted before this request
15310 /// completes.
15311 ///
15312 /// Errors:
15313 /// * UNSPECIFIED - generic internal error (such as in communication
15314 /// with TEE which doesn't generate zx_status_t errors).
15315 /// * other errors are allowed; any other errors should be treated the same
15316 /// as UNSPECIFIED.
15317 pub fn r#get_physical_secure_heap_properties(
15318 &self,
15319 mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15320 ) -> fidl::client::QueryResponseFut<
15321 SecureMemGetPhysicalSecureHeapPropertiesResult,
15322 fidl::encoding::DefaultFuchsiaResourceDialect,
15323 > {
15324 SecureMemProxyInterface::r#get_physical_secure_heap_properties(self, payload)
15325 }
15326
15327 /// This request from sysmem to the securemem driver conveys a physical
15328 /// range to add, for a heap whose physical range(s) are set up via
15329 /// sysmem.
15330 ///
15331 /// Only sysmem can call this because only sysmem is handed the client end
15332 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15333 /// securemem driver is the server end of this protocol.
15334 ///
15335 /// The securemem driver must configure all the covered offsets as protected
15336 /// before responding to this message with success.
15337 ///
15338 /// On failure, the securemem driver must ensure the protected range was not
15339 /// created.
15340 ///
15341 /// Sysmem must only call this up to once if dynamic_protection_ranges
15342 /// false.
15343 ///
15344 /// If dynamic_protection_ranges is true, sysmem can call this multiple
15345 /// times as long as the current number of ranges never exceeds
15346 /// max_protected_range_count.
15347 ///
15348 /// The caller must not attempt to add a range that matches an
15349 /// already-existing range. Added ranges can overlap each other as long as
15350 /// no two ranges match exactly.
15351 ///
15352 /// Errors:
15353 /// * PROTOCOL_DEVIATION - called more than once when
15354 /// !dynamic_protection_ranges. Adding a heap that would cause overall
15355 /// heap count to exceed max_protected_range_count. Unexpected heap, or
15356 /// range that doesn't conform to protected_range_granularity. See log.
15357 /// * UNSPECIFIED - generic internal error (such as in communication
15358 /// with TEE which doesn't generate zx_status_t errors).
15359 /// * other errors are possible, such as from communication failures or
15360 /// server propagation of failures.
15361 pub fn r#add_secure_heap_physical_range(
15362 &self,
15363 mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15364 ) -> fidl::client::QueryResponseFut<
15365 SecureMemAddSecureHeapPhysicalRangeResult,
15366 fidl::encoding::DefaultFuchsiaResourceDialect,
15367 > {
15368 SecureMemProxyInterface::r#add_secure_heap_physical_range(self, payload)
15369 }
15370
15371 /// This request from sysmem to the securemem driver conveys a physical
15372 /// range to delete, for a heap whose physical range(s) are set up via
15373 /// sysmem.
15374 ///
15375 /// Only sysmem can call this because only sysmem is handed the client end
15376 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15377 /// securemem driver is the server end of this protocol.
15378 ///
15379 /// The securemem driver must configure all the covered offsets as not
15380 /// protected before responding to this message with success.
15381 ///
15382 /// On failure, the securemem driver must ensure the protected range was not
15383 /// deleted.
15384 ///
15385 /// Sysmem must not call this if dynamic_protection_ranges false.
15386 ///
15387 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15388 /// on various ranges that exist at the time of the call.
15389 ///
15390 /// If any portion of the range being deleted is not also covered by another
15391 /// protected range, then any ongoing DMA to any part of the entire range
15392 /// may be interrupted / may fail, potentially in a way that's disruptive to
15393 /// the entire system (bus lockup or similar, depending on device details).
15394 /// Therefore, the caller must ensure that no ongoing DMA is occurring to
15395 /// any portion of the range being deleted, unless the caller has other
15396 /// active ranges covering every block of the range being deleted. Ongoing
15397 /// DMA to/from blocks outside the range being deleted is never impacted by
15398 /// the deletion.
15399 ///
15400 /// Errors:
15401 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15402 /// Unexpected heap, or range that doesn't conform to
15403 /// protected_range_granularity.
15404 /// * UNSPECIFIED - generic internal error (such as in communication
15405 /// with TEE which doesn't generate zx_status_t errors).
15406 /// * NOT_FOUND - the specified range is not found.
15407 /// * other errors are possible, such as from communication failures or
15408 /// server propagation of failures.
15409 pub fn r#delete_secure_heap_physical_range(
15410 &self,
15411 mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15412 ) -> fidl::client::QueryResponseFut<
15413 SecureMemDeleteSecureHeapPhysicalRangeResult,
15414 fidl::encoding::DefaultFuchsiaResourceDialect,
15415 > {
15416 SecureMemProxyInterface::r#delete_secure_heap_physical_range(self, payload)
15417 }
15418
15419 /// This request from sysmem to the securemem driver conveys a physical
15420 /// range to modify and its new base and length, for a heap whose physical
15421 /// range(s) are set up via sysmem.
15422 ///
15423 /// Only sysmem can call this because only sysmem is handed the client end
15424 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
15425 /// securemem driver is the server end of this protocol.
15426 ///
15427 /// The securemem driver must configure the range to cover only the new
15428 /// offsets before responding to this message with success.
15429 ///
15430 /// On failure, the securemem driver must ensure the range was not changed.
15431 ///
15432 /// Sysmem must not call this if dynamic_protection_ranges false. Sysmem
15433 /// must not call this if !is_mod_protected_range_available.
15434 ///
15435 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15436 /// on various ranges that exist at the time of the call.
15437 ///
15438 /// The range must only be modified at one end or the other, but not both.
15439 /// If the range is getting shorter, and the un-covered blocks are not
15440 /// covered by other active ranges, any ongoing DMA to the entire range
15441 /// that's geting shorter may fail in a way that disrupts the entire system
15442 /// (bus lockup or similar), so the caller must ensure that no DMA is
15443 /// ongoing to any portion of a range that is getting shorter, unless the
15444 /// blocks being un-covered by the modification to this range are all
15445 /// covered by other active ranges, in which case no disruption to ongoing
15446 /// DMA will occur.
15447 ///
15448 /// If a range is modified to become <= zero length, the range is deleted.
15449 ///
15450 /// Errors:
15451 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15452 /// Unexpected heap, or old_range or new_range that doesn't conform to
15453 /// protected_range_granularity, or old_range and new_range differ in
15454 /// both begin and end (disallowed).
15455 /// * UNSPECIFIED - generic internal error (such as in communication
15456 /// with TEE which doesn't generate zx_status_t errors).
15457 /// * NOT_FOUND - the specified range is not found.
15458 /// * other errors are possible, such as from communication failures or
15459 /// server propagation of failures.
15460 pub fn r#modify_secure_heap_physical_range(
15461 &self,
15462 mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15463 ) -> fidl::client::QueryResponseFut<
15464 SecureMemModifySecureHeapPhysicalRangeResult,
15465 fidl::encoding::DefaultFuchsiaResourceDialect,
15466 > {
15467 SecureMemProxyInterface::r#modify_secure_heap_physical_range(self, payload)
15468 }
15469
15470 /// Zero a sub-range of a currently-existing physical range added via
15471 /// AddSecureHeapPhysicalRange(). The sub-range must be fully covered by
15472 /// exactly one physical range, and must not overlap with any other
15473 /// physical range.
15474 ///
15475 /// is_covering_range_explicit - When true, the covering range must be one
15476 /// of the ranges explicitly created via AddSecureHeapPhysicalRange(),
15477 /// possibly modified since. When false, the covering range must not
15478 /// be one of the ranges explicitly created via
15479 /// AddSecureHeapPhysicalRange(), but the covering range must exist as
15480 /// a covering range not created via AddSecureHeapPhysicalRange(). The
15481 /// covering range is typically the entire physical range (or a range
15482 /// which covers even more) of a heap configured by the TEE and whose
15483 /// configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
15484 ///
15485 /// Ongoing DMA is not disrupted by this request.
15486 ///
15487 /// Errors:
15488 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15489 /// Unexpected heap.
15490 /// * UNSPECIFIED - generic internal error (such as in communication
15491 /// with TEE which doesn't generate zx_status_t errors).
15492 /// * other errors are possible, such as from communication failures or
15493 /// server propagation of failures.
15494 pub fn r#zero_sub_range(
15495 &self,
15496 mut payload: &SecureMemZeroSubRangeRequest,
15497 ) -> fidl::client::QueryResponseFut<
15498 SecureMemZeroSubRangeResult,
15499 fidl::encoding::DefaultFuchsiaResourceDialect,
15500 > {
15501 SecureMemProxyInterface::r#zero_sub_range(self, payload)
15502 }
15503}
15504
15505impl SecureMemProxyInterface for SecureMemProxy {
15506 type GetPhysicalSecureHeapsResponseFut = fidl::client::QueryResponseFut<
15507 SecureMemGetPhysicalSecureHeapsResult,
15508 fidl::encoding::DefaultFuchsiaResourceDialect,
15509 >;
15510 fn r#get_physical_secure_heaps(&self) -> Self::GetPhysicalSecureHeapsResponseFut {
15511 fn _decode(
15512 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15513 ) -> Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error> {
15514 let _response = fidl::client::decode_transaction_body::<
15515 fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapsResponse, Error>,
15516 fidl::encoding::DefaultFuchsiaResourceDialect,
15517 0x38716300592073e3,
15518 >(_buf?)?
15519 .into_result::<SecureMemMarker>("get_physical_secure_heaps")?;
15520 Ok(_response.map(|x| x))
15521 }
15522 self.client.send_query_and_decode::<
15523 fidl::encoding::EmptyPayload,
15524 SecureMemGetPhysicalSecureHeapsResult,
15525 >(
15526 (),
15527 0x38716300592073e3,
15528 fidl::encoding::DynamicFlags::FLEXIBLE,
15529 _decode,
15530 )
15531 }
15532
15533 type GetDynamicSecureHeapsResponseFut = fidl::client::QueryResponseFut<
15534 SecureMemGetDynamicSecureHeapsResult,
15535 fidl::encoding::DefaultFuchsiaResourceDialect,
15536 >;
15537 fn r#get_dynamic_secure_heaps(&self) -> Self::GetDynamicSecureHeapsResponseFut {
15538 fn _decode(
15539 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15540 ) -> Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error> {
15541 let _response = fidl::client::decode_transaction_body::<
15542 fidl::encoding::FlexibleResultType<SecureMemGetDynamicSecureHeapsResponse, Error>,
15543 fidl::encoding::DefaultFuchsiaResourceDialect,
15544 0x1190847f99952834,
15545 >(_buf?)?
15546 .into_result::<SecureMemMarker>("get_dynamic_secure_heaps")?;
15547 Ok(_response.map(|x| x))
15548 }
15549 self.client.send_query_and_decode::<
15550 fidl::encoding::EmptyPayload,
15551 SecureMemGetDynamicSecureHeapsResult,
15552 >(
15553 (),
15554 0x1190847f99952834,
15555 fidl::encoding::DynamicFlags::FLEXIBLE,
15556 _decode,
15557 )
15558 }
15559
15560 type GetPhysicalSecureHeapPropertiesResponseFut = fidl::client::QueryResponseFut<
15561 SecureMemGetPhysicalSecureHeapPropertiesResult,
15562 fidl::encoding::DefaultFuchsiaResourceDialect,
15563 >;
15564 fn r#get_physical_secure_heap_properties(
15565 &self,
15566 mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15567 ) -> Self::GetPhysicalSecureHeapPropertiesResponseFut {
15568 fn _decode(
15569 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15570 ) -> Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error> {
15571 let _response = fidl::client::decode_transaction_body::<
15572 fidl::encoding::FlexibleResultType<
15573 SecureMemGetPhysicalSecureHeapPropertiesResponse,
15574 Error,
15575 >,
15576 fidl::encoding::DefaultFuchsiaResourceDialect,
15577 0xc6f06889009c7bc,
15578 >(_buf?)?
15579 .into_result::<SecureMemMarker>("get_physical_secure_heap_properties")?;
15580 Ok(_response.map(|x| x))
15581 }
15582 self.client.send_query_and_decode::<
15583 SecureMemGetPhysicalSecureHeapPropertiesRequest,
15584 SecureMemGetPhysicalSecureHeapPropertiesResult,
15585 >(
15586 payload,
15587 0xc6f06889009c7bc,
15588 fidl::encoding::DynamicFlags::FLEXIBLE,
15589 _decode,
15590 )
15591 }
15592
15593 type AddSecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15594 SecureMemAddSecureHeapPhysicalRangeResult,
15595 fidl::encoding::DefaultFuchsiaResourceDialect,
15596 >;
15597 fn r#add_secure_heap_physical_range(
15598 &self,
15599 mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15600 ) -> Self::AddSecureHeapPhysicalRangeResponseFut {
15601 fn _decode(
15602 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15603 ) -> Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error> {
15604 let _response = fidl::client::decode_transaction_body::<
15605 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15606 fidl::encoding::DefaultFuchsiaResourceDialect,
15607 0x35f695b9b6c7217a,
15608 >(_buf?)?
15609 .into_result::<SecureMemMarker>("add_secure_heap_physical_range")?;
15610 Ok(_response.map(|x| x))
15611 }
15612 self.client.send_query_and_decode::<
15613 SecureMemAddSecureHeapPhysicalRangeRequest,
15614 SecureMemAddSecureHeapPhysicalRangeResult,
15615 >(
15616 payload,
15617 0x35f695b9b6c7217a,
15618 fidl::encoding::DynamicFlags::FLEXIBLE,
15619 _decode,
15620 )
15621 }
15622
15623 type DeleteSecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15624 SecureMemDeleteSecureHeapPhysicalRangeResult,
15625 fidl::encoding::DefaultFuchsiaResourceDialect,
15626 >;
15627 fn r#delete_secure_heap_physical_range(
15628 &self,
15629 mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15630 ) -> Self::DeleteSecureHeapPhysicalRangeResponseFut {
15631 fn _decode(
15632 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15633 ) -> Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error> {
15634 let _response = fidl::client::decode_transaction_body::<
15635 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15636 fidl::encoding::DefaultFuchsiaResourceDialect,
15637 0xeaa58c650264c9e,
15638 >(_buf?)?
15639 .into_result::<SecureMemMarker>("delete_secure_heap_physical_range")?;
15640 Ok(_response.map(|x| x))
15641 }
15642 self.client.send_query_and_decode::<
15643 SecureMemDeleteSecureHeapPhysicalRangeRequest,
15644 SecureMemDeleteSecureHeapPhysicalRangeResult,
15645 >(
15646 payload,
15647 0xeaa58c650264c9e,
15648 fidl::encoding::DynamicFlags::FLEXIBLE,
15649 _decode,
15650 )
15651 }
15652
15653 type ModifySecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15654 SecureMemModifySecureHeapPhysicalRangeResult,
15655 fidl::encoding::DefaultFuchsiaResourceDialect,
15656 >;
15657 fn r#modify_secure_heap_physical_range(
15658 &self,
15659 mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15660 ) -> Self::ModifySecureHeapPhysicalRangeResponseFut {
15661 fn _decode(
15662 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15663 ) -> Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error> {
15664 let _response = fidl::client::decode_transaction_body::<
15665 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15666 fidl::encoding::DefaultFuchsiaResourceDialect,
15667 0x60b7448aa1187734,
15668 >(_buf?)?
15669 .into_result::<SecureMemMarker>("modify_secure_heap_physical_range")?;
15670 Ok(_response.map(|x| x))
15671 }
15672 self.client.send_query_and_decode::<
15673 SecureMemModifySecureHeapPhysicalRangeRequest,
15674 SecureMemModifySecureHeapPhysicalRangeResult,
15675 >(
15676 payload,
15677 0x60b7448aa1187734,
15678 fidl::encoding::DynamicFlags::FLEXIBLE,
15679 _decode,
15680 )
15681 }
15682
15683 type ZeroSubRangeResponseFut = fidl::client::QueryResponseFut<
15684 SecureMemZeroSubRangeResult,
15685 fidl::encoding::DefaultFuchsiaResourceDialect,
15686 >;
15687 fn r#zero_sub_range(
15688 &self,
15689 mut payload: &SecureMemZeroSubRangeRequest,
15690 ) -> Self::ZeroSubRangeResponseFut {
15691 fn _decode(
15692 mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15693 ) -> Result<SecureMemZeroSubRangeResult, fidl::Error> {
15694 let _response = fidl::client::decode_transaction_body::<
15695 fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15696 fidl::encoding::DefaultFuchsiaResourceDialect,
15697 0x5b25b7901a385ce5,
15698 >(_buf?)?
15699 .into_result::<SecureMemMarker>("zero_sub_range")?;
15700 Ok(_response.map(|x| x))
15701 }
15702 self.client
15703 .send_query_and_decode::<SecureMemZeroSubRangeRequest, SecureMemZeroSubRangeResult>(
15704 payload,
15705 0x5b25b7901a385ce5,
15706 fidl::encoding::DynamicFlags::FLEXIBLE,
15707 _decode,
15708 )
15709 }
15710}
15711
15712pub struct SecureMemEventStream {
15713 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
15714}
15715
15716impl std::marker::Unpin for SecureMemEventStream {}
15717
15718impl futures::stream::FusedStream for SecureMemEventStream {
15719 fn is_terminated(&self) -> bool {
15720 self.event_receiver.is_terminated()
15721 }
15722}
15723
15724impl futures::Stream for SecureMemEventStream {
15725 type Item = Result<SecureMemEvent, fidl::Error>;
15726
15727 fn poll_next(
15728 mut self: std::pin::Pin<&mut Self>,
15729 cx: &mut std::task::Context<'_>,
15730 ) -> std::task::Poll<Option<Self::Item>> {
15731 match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
15732 &mut self.event_receiver,
15733 cx
15734 )?) {
15735 Some(buf) => std::task::Poll::Ready(Some(SecureMemEvent::decode(buf))),
15736 None => std::task::Poll::Ready(None),
15737 }
15738 }
15739}
15740
15741#[derive(Debug)]
15742pub enum SecureMemEvent {
15743 #[non_exhaustive]
15744 _UnknownEvent {
15745 /// Ordinal of the event that was sent.
15746 ordinal: u64,
15747 },
15748}
15749
15750impl SecureMemEvent {
15751 /// Decodes a message buffer as a [`SecureMemEvent`].
15752 fn decode(
15753 mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
15754 ) -> Result<SecureMemEvent, fidl::Error> {
15755 let (bytes, _handles) = buf.split_mut();
15756 let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
15757 debug_assert_eq!(tx_header.tx_id, 0);
15758 match tx_header.ordinal {
15759 _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
15760 Ok(SecureMemEvent::_UnknownEvent { ordinal: tx_header.ordinal })
15761 }
15762 _ => Err(fidl::Error::UnknownOrdinal {
15763 ordinal: tx_header.ordinal,
15764 protocol_name: <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
15765 }),
15766 }
15767 }
15768}
15769
15770/// A Stream of incoming requests for fuchsia.sysmem2/SecureMem.
15771pub struct SecureMemRequestStream {
15772 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
15773 is_terminated: bool,
15774}
15775
15776impl std::marker::Unpin for SecureMemRequestStream {}
15777
15778impl futures::stream::FusedStream for SecureMemRequestStream {
15779 fn is_terminated(&self) -> bool {
15780 self.is_terminated
15781 }
15782}
15783
15784impl fidl::endpoints::RequestStream for SecureMemRequestStream {
15785 type Protocol = SecureMemMarker;
15786 type ControlHandle = SecureMemControlHandle;
15787
15788 fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
15789 Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
15790 }
15791
15792 fn control_handle(&self) -> Self::ControlHandle {
15793 SecureMemControlHandle { inner: self.inner.clone() }
15794 }
15795
15796 fn into_inner(
15797 self,
15798 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
15799 {
15800 (self.inner, self.is_terminated)
15801 }
15802
15803 fn from_inner(
15804 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
15805 is_terminated: bool,
15806 ) -> Self {
15807 Self { inner, is_terminated }
15808 }
15809}
15810
15811impl futures::Stream for SecureMemRequestStream {
15812 type Item = Result<SecureMemRequest, fidl::Error>;
15813
15814 fn poll_next(
15815 mut self: std::pin::Pin<&mut Self>,
15816 cx: &mut std::task::Context<'_>,
15817 ) -> std::task::Poll<Option<Self::Item>> {
15818 let this = &mut *self;
15819 if this.inner.check_shutdown(cx) {
15820 this.is_terminated = true;
15821 return std::task::Poll::Ready(None);
15822 }
15823 if this.is_terminated {
15824 panic!("polled SecureMemRequestStream after completion");
15825 }
15826 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
15827 |bytes, handles| {
15828 match this.inner.channel().read_etc(cx, bytes, handles) {
15829 std::task::Poll::Ready(Ok(())) => {}
15830 std::task::Poll::Pending => return std::task::Poll::Pending,
15831 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
15832 this.is_terminated = true;
15833 return std::task::Poll::Ready(None);
15834 }
15835 std::task::Poll::Ready(Err(e)) => {
15836 return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
15837 e.into(),
15838 ))))
15839 }
15840 }
15841
15842 // A message has been received from the channel
15843 let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
15844
15845 std::task::Poll::Ready(Some(match header.ordinal {
15846 0x38716300592073e3 => {
15847 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15848 let mut req = fidl::new_empty!(
15849 fidl::encoding::EmptyPayload,
15850 fidl::encoding::DefaultFuchsiaResourceDialect
15851 );
15852 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
15853 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15854 Ok(SecureMemRequest::GetPhysicalSecureHeaps {
15855 responder: SecureMemGetPhysicalSecureHeapsResponder {
15856 control_handle: std::mem::ManuallyDrop::new(control_handle),
15857 tx_id: header.tx_id,
15858 },
15859 })
15860 }
15861 0x1190847f99952834 => {
15862 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15863 let mut req = fidl::new_empty!(
15864 fidl::encoding::EmptyPayload,
15865 fidl::encoding::DefaultFuchsiaResourceDialect
15866 );
15867 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
15868 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15869 Ok(SecureMemRequest::GetDynamicSecureHeaps {
15870 responder: SecureMemGetDynamicSecureHeapsResponder {
15871 control_handle: std::mem::ManuallyDrop::new(control_handle),
15872 tx_id: header.tx_id,
15873 },
15874 })
15875 }
15876 0xc6f06889009c7bc => {
15877 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15878 let mut req = fidl::new_empty!(
15879 SecureMemGetPhysicalSecureHeapPropertiesRequest,
15880 fidl::encoding::DefaultFuchsiaResourceDialect
15881 );
15882 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemGetPhysicalSecureHeapPropertiesRequest>(&header, _body_bytes, handles, &mut req)?;
15883 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15884 Ok(SecureMemRequest::GetPhysicalSecureHeapProperties {
15885 payload: req,
15886 responder: SecureMemGetPhysicalSecureHeapPropertiesResponder {
15887 control_handle: std::mem::ManuallyDrop::new(control_handle),
15888 tx_id: header.tx_id,
15889 },
15890 })
15891 }
15892 0x35f695b9b6c7217a => {
15893 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15894 let mut req = fidl::new_empty!(
15895 SecureMemAddSecureHeapPhysicalRangeRequest,
15896 fidl::encoding::DefaultFuchsiaResourceDialect
15897 );
15898 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemAddSecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15899 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15900 Ok(SecureMemRequest::AddSecureHeapPhysicalRange {
15901 payload: req,
15902 responder: SecureMemAddSecureHeapPhysicalRangeResponder {
15903 control_handle: std::mem::ManuallyDrop::new(control_handle),
15904 tx_id: header.tx_id,
15905 },
15906 })
15907 }
15908 0xeaa58c650264c9e => {
15909 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15910 let mut req = fidl::new_empty!(
15911 SecureMemDeleteSecureHeapPhysicalRangeRequest,
15912 fidl::encoding::DefaultFuchsiaResourceDialect
15913 );
15914 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemDeleteSecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15915 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15916 Ok(SecureMemRequest::DeleteSecureHeapPhysicalRange {
15917 payload: req,
15918 responder: SecureMemDeleteSecureHeapPhysicalRangeResponder {
15919 control_handle: std::mem::ManuallyDrop::new(control_handle),
15920 tx_id: header.tx_id,
15921 },
15922 })
15923 }
15924 0x60b7448aa1187734 => {
15925 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15926 let mut req = fidl::new_empty!(
15927 SecureMemModifySecureHeapPhysicalRangeRequest,
15928 fidl::encoding::DefaultFuchsiaResourceDialect
15929 );
15930 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemModifySecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15931 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15932 Ok(SecureMemRequest::ModifySecureHeapPhysicalRange {
15933 payload: req,
15934 responder: SecureMemModifySecureHeapPhysicalRangeResponder {
15935 control_handle: std::mem::ManuallyDrop::new(control_handle),
15936 tx_id: header.tx_id,
15937 },
15938 })
15939 }
15940 0x5b25b7901a385ce5 => {
15941 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15942 let mut req = fidl::new_empty!(
15943 SecureMemZeroSubRangeRequest,
15944 fidl::encoding::DefaultFuchsiaResourceDialect
15945 );
15946 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemZeroSubRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15947 let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15948 Ok(SecureMemRequest::ZeroSubRange {
15949 payload: req,
15950 responder: SecureMemZeroSubRangeResponder {
15951 control_handle: std::mem::ManuallyDrop::new(control_handle),
15952 tx_id: header.tx_id,
15953 },
15954 })
15955 }
15956 _ if header.tx_id == 0
15957 && header
15958 .dynamic_flags()
15959 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
15960 {
15961 Ok(SecureMemRequest::_UnknownMethod {
15962 ordinal: header.ordinal,
15963 control_handle: SecureMemControlHandle { inner: this.inner.clone() },
15964 method_type: fidl::MethodType::OneWay,
15965 })
15966 }
15967 _ if header
15968 .dynamic_flags()
15969 .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
15970 {
15971 this.inner.send_framework_err(
15972 fidl::encoding::FrameworkErr::UnknownMethod,
15973 header.tx_id,
15974 header.ordinal,
15975 header.dynamic_flags(),
15976 (bytes, handles),
15977 )?;
15978 Ok(SecureMemRequest::_UnknownMethod {
15979 ordinal: header.ordinal,
15980 control_handle: SecureMemControlHandle { inner: this.inner.clone() },
15981 method_type: fidl::MethodType::TwoWay,
15982 })
15983 }
15984 _ => Err(fidl::Error::UnknownOrdinal {
15985 ordinal: header.ordinal,
15986 protocol_name:
15987 <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
15988 }),
15989 }))
15990 },
15991 )
15992 }
15993}
15994
15995/// SecureMem
15996///
15997/// The client is sysmem. The server is securemem driver.
15998///
15999/// TEE - Trusted Execution Environment.
16000///
16001/// REE - Rich Execution Environment.
16002///
16003/// Enables sysmem to call the securemem driver to get any secure heaps
16004/// configured via the TEE (or via the securemem driver), and set any physical
16005/// secure heaps configured via sysmem.
16006///
16007/// Presently, dynamically-allocated secure heaps are configured via sysmem, as
16008/// it starts quite early during boot and can successfully reserve contiguous
16009/// physical memory. Presently, fixed-location secure heaps are configured via
16010/// TEE, as the plumbing goes from the bootloader to the TEE. However, this
16011/// protocol intentionally doesn't care which heaps are dynamically-allocated
16012/// and which are fixed-location.
16013#[derive(Debug)]
16014pub enum SecureMemRequest {
16015 /// Gets the physical address and length of any secure heap whose physical
16016 /// range is configured via the TEE.
16017 ///
16018 /// Presently, these will be fixed physical addresses and lengths, with the
16019 /// location plumbed via the TEE.
16020 ///
16021 /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
16022 /// when there isn't any special heap-specific per-VMO setup or teardown
16023 /// required.
16024 ///
16025 /// The physical range must be secured/protected by the TEE before the
16026 /// securemem driver responds to this request with success.
16027 ///
16028 /// Sysmem should only call this once. Returning zero heaps is not a
16029 /// failure.
16030 ///
16031 /// Errors:
16032 /// * PROTOCOL_DEVIATION - called more than once.
16033 /// * UNSPECIFIED - generic internal error (such as in communication
16034 /// with TEE which doesn't generate zx_status_t errors).
16035 /// * other errors are allowed; any other errors should be treated the same
16036 /// as UNSPECIFIED.
16037 GetPhysicalSecureHeaps { responder: SecureMemGetPhysicalSecureHeapsResponder },
16038 /// Gets information about any secure heaps whose physical pages are not
16039 /// configured by the TEE, but by sysmem.
16040 ///
16041 /// Sysmem should only call this once. Returning zero heaps is not a
16042 /// failure.
16043 ///
16044 /// Errors:
16045 /// * PROTOCOL_DEVIATION - called more than once.
16046 /// * UNSPECIFIED - generic internal error (such as in communication
16047 /// with TEE which doesn't generate zx_status_t errors).
16048 /// * other errors are allowed; any other errors should be treated the same
16049 /// as UNSPECIFIED.
16050 GetDynamicSecureHeaps { responder: SecureMemGetDynamicSecureHeapsResponder },
16051 /// This request from sysmem to the securemem driver gets the properties of
16052 /// a protected/secure heap.
16053 ///
16054 /// This only handles heaps with a single contiguous physical extent.
16055 ///
16056 /// The heap's entire physical range is indicated in case this request needs
16057 /// some physical space to auto-detect how many ranges are REE-usable. Any
16058 /// temporary HW protection ranges will be deleted before this request
16059 /// completes.
16060 ///
16061 /// Errors:
16062 /// * UNSPECIFIED - generic internal error (such as in communication
16063 /// with TEE which doesn't generate zx_status_t errors).
16064 /// * other errors are allowed; any other errors should be treated the same
16065 /// as UNSPECIFIED.
16066 GetPhysicalSecureHeapProperties {
16067 payload: SecureMemGetPhysicalSecureHeapPropertiesRequest,
16068 responder: SecureMemGetPhysicalSecureHeapPropertiesResponder,
16069 },
16070 /// This request from sysmem to the securemem driver conveys a physical
16071 /// range to add, for a heap whose physical range(s) are set up via
16072 /// sysmem.
16073 ///
16074 /// Only sysmem can call this because only sysmem is handed the client end
16075 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
16076 /// securemem driver is the server end of this protocol.
16077 ///
16078 /// The securemem driver must configure all the covered offsets as protected
16079 /// before responding to this message with success.
16080 ///
16081 /// On failure, the securemem driver must ensure the protected range was not
16082 /// created.
16083 ///
16084 /// Sysmem must only call this up to once if dynamic_protection_ranges
16085 /// false.
16086 ///
16087 /// If dynamic_protection_ranges is true, sysmem can call this multiple
16088 /// times as long as the current number of ranges never exceeds
16089 /// max_protected_range_count.
16090 ///
16091 /// The caller must not attempt to add a range that matches an
16092 /// already-existing range. Added ranges can overlap each other as long as
16093 /// no two ranges match exactly.
16094 ///
16095 /// Errors:
16096 /// * PROTOCOL_DEVIATION - called more than once when
16097 /// !dynamic_protection_ranges. Adding a heap that would cause overall
16098 /// heap count to exceed max_protected_range_count. Unexpected heap, or
16099 /// range that doesn't conform to protected_range_granularity. See log.
16100 /// * UNSPECIFIED - generic internal error (such as in communication
16101 /// with TEE which doesn't generate zx_status_t errors).
16102 /// * other errors are possible, such as from communication failures or
16103 /// server propagation of failures.
16104 AddSecureHeapPhysicalRange {
16105 payload: SecureMemAddSecureHeapPhysicalRangeRequest,
16106 responder: SecureMemAddSecureHeapPhysicalRangeResponder,
16107 },
16108 /// This request from sysmem to the securemem driver conveys a physical
16109 /// range to delete, for a heap whose physical range(s) are set up via
16110 /// sysmem.
16111 ///
16112 /// Only sysmem can call this because only sysmem is handed the client end
16113 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
16114 /// securemem driver is the server end of this protocol.
16115 ///
16116 /// The securemem driver must configure all the covered offsets as not
16117 /// protected before responding to this message with success.
16118 ///
16119 /// On failure, the securemem driver must ensure the protected range was not
16120 /// deleted.
16121 ///
16122 /// Sysmem must not call this if dynamic_protection_ranges false.
16123 ///
16124 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
16125 /// on various ranges that exist at the time of the call.
16126 ///
16127 /// If any portion of the range being deleted is not also covered by another
16128 /// protected range, then any ongoing DMA to any part of the entire range
16129 /// may be interrupted / may fail, potentially in a way that's disruptive to
16130 /// the entire system (bus lockup or similar, depending on device details).
16131 /// Therefore, the caller must ensure that no ongoing DMA is occurring to
16132 /// any portion of the range being deleted, unless the caller has other
16133 /// active ranges covering every block of the range being deleted. Ongoing
16134 /// DMA to/from blocks outside the range being deleted is never impacted by
16135 /// the deletion.
16136 ///
16137 /// Errors:
16138 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16139 /// Unexpected heap, or range that doesn't conform to
16140 /// protected_range_granularity.
16141 /// * UNSPECIFIED - generic internal error (such as in communication
16142 /// with TEE which doesn't generate zx_status_t errors).
16143 /// * NOT_FOUND - the specified range is not found.
16144 /// * other errors are possible, such as from communication failures or
16145 /// server propagation of failures.
16146 DeleteSecureHeapPhysicalRange {
16147 payload: SecureMemDeleteSecureHeapPhysicalRangeRequest,
16148 responder: SecureMemDeleteSecureHeapPhysicalRangeResponder,
16149 },
16150 /// This request from sysmem to the securemem driver conveys a physical
16151 /// range to modify and its new base and length, for a heap whose physical
16152 /// range(s) are set up via sysmem.
16153 ///
16154 /// Only sysmem can call this because only sysmem is handed the client end
16155 /// of a FIDL channel serving this protocol, via RegisterSecureMem(). The
16156 /// securemem driver is the server end of this protocol.
16157 ///
16158 /// The securemem driver must configure the range to cover only the new
16159 /// offsets before responding to this message with success.
16160 ///
16161 /// On failure, the securemem driver must ensure the range was not changed.
16162 ///
16163 /// Sysmem must not call this if dynamic_protection_ranges false. Sysmem
16164 /// must not call this if !is_mod_protected_range_available.
16165 ///
16166 /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
16167 /// on various ranges that exist at the time of the call.
16168 ///
16169 /// The range must only be modified at one end or the other, but not both.
16170 /// If the range is getting shorter, and the un-covered blocks are not
16171 /// covered by other active ranges, any ongoing DMA to the entire range
16172 /// that's geting shorter may fail in a way that disrupts the entire system
16173 /// (bus lockup or similar), so the caller must ensure that no DMA is
16174 /// ongoing to any portion of a range that is getting shorter, unless the
16175 /// blocks being un-covered by the modification to this range are all
16176 /// covered by other active ranges, in which case no disruption to ongoing
16177 /// DMA will occur.
16178 ///
16179 /// If a range is modified to become <= zero length, the range is deleted.
16180 ///
16181 /// Errors:
16182 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16183 /// Unexpected heap, or old_range or new_range that doesn't conform to
16184 /// protected_range_granularity, or old_range and new_range differ in
16185 /// both begin and end (disallowed).
16186 /// * UNSPECIFIED - generic internal error (such as in communication
16187 /// with TEE which doesn't generate zx_status_t errors).
16188 /// * NOT_FOUND - the specified range is not found.
16189 /// * other errors are possible, such as from communication failures or
16190 /// server propagation of failures.
16191 ModifySecureHeapPhysicalRange {
16192 payload: SecureMemModifySecureHeapPhysicalRangeRequest,
16193 responder: SecureMemModifySecureHeapPhysicalRangeResponder,
16194 },
16195 /// Zero a sub-range of a currently-existing physical range added via
16196 /// AddSecureHeapPhysicalRange(). The sub-range must be fully covered by
16197 /// exactly one physical range, and must not overlap with any other
16198 /// physical range.
16199 ///
16200 /// is_covering_range_explicit - When true, the covering range must be one
16201 /// of the ranges explicitly created via AddSecureHeapPhysicalRange(),
16202 /// possibly modified since. When false, the covering range must not
16203 /// be one of the ranges explicitly created via
16204 /// AddSecureHeapPhysicalRange(), but the covering range must exist as
16205 /// a covering range not created via AddSecureHeapPhysicalRange(). The
16206 /// covering range is typically the entire physical range (or a range
16207 /// which covers even more) of a heap configured by the TEE and whose
16208 /// configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
16209 ///
16210 /// Ongoing DMA is not disrupted by this request.
16211 ///
16212 /// Errors:
16213 /// * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16214 /// Unexpected heap.
16215 /// * UNSPECIFIED - generic internal error (such as in communication
16216 /// with TEE which doesn't generate zx_status_t errors).
16217 /// * other errors are possible, such as from communication failures or
16218 /// server propagation of failures.
16219 ZeroSubRange {
16220 payload: SecureMemZeroSubRangeRequest,
16221 responder: SecureMemZeroSubRangeResponder,
16222 },
16223 /// An interaction was received which does not match any known method.
16224 #[non_exhaustive]
16225 _UnknownMethod {
16226 /// Ordinal of the method that was called.
16227 ordinal: u64,
16228 control_handle: SecureMemControlHandle,
16229 method_type: fidl::MethodType,
16230 },
16231}
16232
16233impl SecureMemRequest {
16234 #[allow(irrefutable_let_patterns)]
16235 pub fn into_get_physical_secure_heaps(
16236 self,
16237 ) -> Option<(SecureMemGetPhysicalSecureHeapsResponder)> {
16238 if let SecureMemRequest::GetPhysicalSecureHeaps { responder } = self {
16239 Some((responder))
16240 } else {
16241 None
16242 }
16243 }
16244
16245 #[allow(irrefutable_let_patterns)]
16246 pub fn into_get_dynamic_secure_heaps(
16247 self,
16248 ) -> Option<(SecureMemGetDynamicSecureHeapsResponder)> {
16249 if let SecureMemRequest::GetDynamicSecureHeaps { responder } = self {
16250 Some((responder))
16251 } else {
16252 None
16253 }
16254 }
16255
16256 #[allow(irrefutable_let_patterns)]
16257 pub fn into_get_physical_secure_heap_properties(
16258 self,
16259 ) -> Option<(
16260 SecureMemGetPhysicalSecureHeapPropertiesRequest,
16261 SecureMemGetPhysicalSecureHeapPropertiesResponder,
16262 )> {
16263 if let SecureMemRequest::GetPhysicalSecureHeapProperties { payload, responder } = self {
16264 Some((payload, responder))
16265 } else {
16266 None
16267 }
16268 }
16269
16270 #[allow(irrefutable_let_patterns)]
16271 pub fn into_add_secure_heap_physical_range(
16272 self,
16273 ) -> Option<(
16274 SecureMemAddSecureHeapPhysicalRangeRequest,
16275 SecureMemAddSecureHeapPhysicalRangeResponder,
16276 )> {
16277 if let SecureMemRequest::AddSecureHeapPhysicalRange { payload, responder } = self {
16278 Some((payload, responder))
16279 } else {
16280 None
16281 }
16282 }
16283
16284 #[allow(irrefutable_let_patterns)]
16285 pub fn into_delete_secure_heap_physical_range(
16286 self,
16287 ) -> Option<(
16288 SecureMemDeleteSecureHeapPhysicalRangeRequest,
16289 SecureMemDeleteSecureHeapPhysicalRangeResponder,
16290 )> {
16291 if let SecureMemRequest::DeleteSecureHeapPhysicalRange { payload, responder } = self {
16292 Some((payload, responder))
16293 } else {
16294 None
16295 }
16296 }
16297
16298 #[allow(irrefutable_let_patterns)]
16299 pub fn into_modify_secure_heap_physical_range(
16300 self,
16301 ) -> Option<(
16302 SecureMemModifySecureHeapPhysicalRangeRequest,
16303 SecureMemModifySecureHeapPhysicalRangeResponder,
16304 )> {
16305 if let SecureMemRequest::ModifySecureHeapPhysicalRange { payload, responder } = self {
16306 Some((payload, responder))
16307 } else {
16308 None
16309 }
16310 }
16311
16312 #[allow(irrefutable_let_patterns)]
16313 pub fn into_zero_sub_range(
16314 self,
16315 ) -> Option<(SecureMemZeroSubRangeRequest, SecureMemZeroSubRangeResponder)> {
16316 if let SecureMemRequest::ZeroSubRange { payload, responder } = self {
16317 Some((payload, responder))
16318 } else {
16319 None
16320 }
16321 }
16322
16323 /// Name of the method defined in FIDL
16324 pub fn method_name(&self) -> &'static str {
16325 match *self {
16326 SecureMemRequest::GetPhysicalSecureHeaps { .. } => "get_physical_secure_heaps",
16327 SecureMemRequest::GetDynamicSecureHeaps { .. } => "get_dynamic_secure_heaps",
16328 SecureMemRequest::GetPhysicalSecureHeapProperties { .. } => {
16329 "get_physical_secure_heap_properties"
16330 }
16331 SecureMemRequest::AddSecureHeapPhysicalRange { .. } => "add_secure_heap_physical_range",
16332 SecureMemRequest::DeleteSecureHeapPhysicalRange { .. } => {
16333 "delete_secure_heap_physical_range"
16334 }
16335 SecureMemRequest::ModifySecureHeapPhysicalRange { .. } => {
16336 "modify_secure_heap_physical_range"
16337 }
16338 SecureMemRequest::ZeroSubRange { .. } => "zero_sub_range",
16339 SecureMemRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
16340 "unknown one-way method"
16341 }
16342 SecureMemRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
16343 "unknown two-way method"
16344 }
16345 }
16346 }
16347}
16348
16349#[derive(Debug, Clone)]
16350pub struct SecureMemControlHandle {
16351 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
16352}
16353
16354impl fidl::endpoints::ControlHandle for SecureMemControlHandle {
16355 fn shutdown(&self) {
16356 self.inner.shutdown()
16357 }
16358 fn shutdown_with_epitaph(&self, status: zx_status::Status) {
16359 self.inner.shutdown_with_epitaph(status)
16360 }
16361
16362 fn is_closed(&self) -> bool {
16363 self.inner.channel().is_closed()
16364 }
16365 fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
16366 self.inner.channel().on_closed()
16367 }
16368
16369 #[cfg(target_os = "fuchsia")]
16370 fn signal_peer(
16371 &self,
16372 clear_mask: zx::Signals,
16373 set_mask: zx::Signals,
16374 ) -> Result<(), zx_status::Status> {
16375 use fidl::Peered;
16376 self.inner.channel().signal_peer(clear_mask, set_mask)
16377 }
16378}
16379
16380impl SecureMemControlHandle {}
16381
16382#[must_use = "FIDL methods require a response to be sent"]
16383#[derive(Debug)]
16384pub struct SecureMemGetPhysicalSecureHeapsResponder {
16385 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16386 tx_id: u32,
16387}
16388
16389/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16390/// if the responder is dropped without sending a response, so that the client
16391/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16392impl std::ops::Drop for SecureMemGetPhysicalSecureHeapsResponder {
16393 fn drop(&mut self) {
16394 self.control_handle.shutdown();
16395 // Safety: drops once, never accessed again
16396 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16397 }
16398}
16399
16400impl fidl::endpoints::Responder for SecureMemGetPhysicalSecureHeapsResponder {
16401 type ControlHandle = SecureMemControlHandle;
16402
16403 fn control_handle(&self) -> &SecureMemControlHandle {
16404 &self.control_handle
16405 }
16406
16407 fn drop_without_shutdown(mut self) {
16408 // Safety: drops once, never accessed again due to mem::forget
16409 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16410 // Prevent Drop from running (which would shut down the channel)
16411 std::mem::forget(self);
16412 }
16413}
16414
16415impl SecureMemGetPhysicalSecureHeapsResponder {
16416 /// Sends a response to the FIDL transaction.
16417 ///
16418 /// Sets the channel to shutdown if an error occurs.
16419 pub fn send(
16420 self,
16421 mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16422 ) -> Result<(), fidl::Error> {
16423 let _result = self.send_raw(result);
16424 if _result.is_err() {
16425 self.control_handle.shutdown();
16426 }
16427 self.drop_without_shutdown();
16428 _result
16429 }
16430
16431 /// Similar to "send" but does not shutdown the channel if an error occurs.
16432 pub fn send_no_shutdown_on_err(
16433 self,
16434 mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16435 ) -> Result<(), fidl::Error> {
16436 let _result = self.send_raw(result);
16437 self.drop_without_shutdown();
16438 _result
16439 }
16440
16441 fn send_raw(
16442 &self,
16443 mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16444 ) -> Result<(), fidl::Error> {
16445 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16446 SecureMemGetPhysicalSecureHeapsResponse,
16447 Error,
16448 >>(
16449 fidl::encoding::FlexibleResult::new(result),
16450 self.tx_id,
16451 0x38716300592073e3,
16452 fidl::encoding::DynamicFlags::FLEXIBLE,
16453 )
16454 }
16455}
16456
16457#[must_use = "FIDL methods require a response to be sent"]
16458#[derive(Debug)]
16459pub struct SecureMemGetDynamicSecureHeapsResponder {
16460 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16461 tx_id: u32,
16462}
16463
16464/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16465/// if the responder is dropped without sending a response, so that the client
16466/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16467impl std::ops::Drop for SecureMemGetDynamicSecureHeapsResponder {
16468 fn drop(&mut self) {
16469 self.control_handle.shutdown();
16470 // Safety: drops once, never accessed again
16471 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16472 }
16473}
16474
16475impl fidl::endpoints::Responder for SecureMemGetDynamicSecureHeapsResponder {
16476 type ControlHandle = SecureMemControlHandle;
16477
16478 fn control_handle(&self) -> &SecureMemControlHandle {
16479 &self.control_handle
16480 }
16481
16482 fn drop_without_shutdown(mut self) {
16483 // Safety: drops once, never accessed again due to mem::forget
16484 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16485 // Prevent Drop from running (which would shut down the channel)
16486 std::mem::forget(self);
16487 }
16488}
16489
16490impl SecureMemGetDynamicSecureHeapsResponder {
16491 /// Sends a response to the FIDL transaction.
16492 ///
16493 /// Sets the channel to shutdown if an error occurs.
16494 pub fn send(
16495 self,
16496 mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16497 ) -> Result<(), fidl::Error> {
16498 let _result = self.send_raw(result);
16499 if _result.is_err() {
16500 self.control_handle.shutdown();
16501 }
16502 self.drop_without_shutdown();
16503 _result
16504 }
16505
16506 /// Similar to "send" but does not shutdown the channel if an error occurs.
16507 pub fn send_no_shutdown_on_err(
16508 self,
16509 mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16510 ) -> Result<(), fidl::Error> {
16511 let _result = self.send_raw(result);
16512 self.drop_without_shutdown();
16513 _result
16514 }
16515
16516 fn send_raw(
16517 &self,
16518 mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16519 ) -> Result<(), fidl::Error> {
16520 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16521 SecureMemGetDynamicSecureHeapsResponse,
16522 Error,
16523 >>(
16524 fidl::encoding::FlexibleResult::new(result),
16525 self.tx_id,
16526 0x1190847f99952834,
16527 fidl::encoding::DynamicFlags::FLEXIBLE,
16528 )
16529 }
16530}
16531
16532#[must_use = "FIDL methods require a response to be sent"]
16533#[derive(Debug)]
16534pub struct SecureMemGetPhysicalSecureHeapPropertiesResponder {
16535 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16536 tx_id: u32,
16537}
16538
16539/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16540/// if the responder is dropped without sending a response, so that the client
16541/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16542impl std::ops::Drop for SecureMemGetPhysicalSecureHeapPropertiesResponder {
16543 fn drop(&mut self) {
16544 self.control_handle.shutdown();
16545 // Safety: drops once, never accessed again
16546 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16547 }
16548}
16549
16550impl fidl::endpoints::Responder for SecureMemGetPhysicalSecureHeapPropertiesResponder {
16551 type ControlHandle = SecureMemControlHandle;
16552
16553 fn control_handle(&self) -> &SecureMemControlHandle {
16554 &self.control_handle
16555 }
16556
16557 fn drop_without_shutdown(mut self) {
16558 // Safety: drops once, never accessed again due to mem::forget
16559 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16560 // Prevent Drop from running (which would shut down the channel)
16561 std::mem::forget(self);
16562 }
16563}
16564
16565impl SecureMemGetPhysicalSecureHeapPropertiesResponder {
16566 /// Sends a response to the FIDL transaction.
16567 ///
16568 /// Sets the channel to shutdown if an error occurs.
16569 pub fn send(
16570 self,
16571 mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16572 ) -> Result<(), fidl::Error> {
16573 let _result = self.send_raw(result);
16574 if _result.is_err() {
16575 self.control_handle.shutdown();
16576 }
16577 self.drop_without_shutdown();
16578 _result
16579 }
16580
16581 /// Similar to "send" but does not shutdown the channel if an error occurs.
16582 pub fn send_no_shutdown_on_err(
16583 self,
16584 mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16585 ) -> Result<(), fidl::Error> {
16586 let _result = self.send_raw(result);
16587 self.drop_without_shutdown();
16588 _result
16589 }
16590
16591 fn send_raw(
16592 &self,
16593 mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16594 ) -> Result<(), fidl::Error> {
16595 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16596 SecureMemGetPhysicalSecureHeapPropertiesResponse,
16597 Error,
16598 >>(
16599 fidl::encoding::FlexibleResult::new(result),
16600 self.tx_id,
16601 0xc6f06889009c7bc,
16602 fidl::encoding::DynamicFlags::FLEXIBLE,
16603 )
16604 }
16605}
16606
16607#[must_use = "FIDL methods require a response to be sent"]
16608#[derive(Debug)]
16609pub struct SecureMemAddSecureHeapPhysicalRangeResponder {
16610 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16611 tx_id: u32,
16612}
16613
16614/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16615/// if the responder is dropped without sending a response, so that the client
16616/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16617impl std::ops::Drop for SecureMemAddSecureHeapPhysicalRangeResponder {
16618 fn drop(&mut self) {
16619 self.control_handle.shutdown();
16620 // Safety: drops once, never accessed again
16621 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16622 }
16623}
16624
16625impl fidl::endpoints::Responder for SecureMemAddSecureHeapPhysicalRangeResponder {
16626 type ControlHandle = SecureMemControlHandle;
16627
16628 fn control_handle(&self) -> &SecureMemControlHandle {
16629 &self.control_handle
16630 }
16631
16632 fn drop_without_shutdown(mut self) {
16633 // Safety: drops once, never accessed again due to mem::forget
16634 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16635 // Prevent Drop from running (which would shut down the channel)
16636 std::mem::forget(self);
16637 }
16638}
16639
16640impl SecureMemAddSecureHeapPhysicalRangeResponder {
16641 /// Sends a response to the FIDL transaction.
16642 ///
16643 /// Sets the channel to shutdown if an error occurs.
16644 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16645 let _result = self.send_raw(result);
16646 if _result.is_err() {
16647 self.control_handle.shutdown();
16648 }
16649 self.drop_without_shutdown();
16650 _result
16651 }
16652
16653 /// Similar to "send" but does not shutdown the channel if an error occurs.
16654 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16655 let _result = self.send_raw(result);
16656 self.drop_without_shutdown();
16657 _result
16658 }
16659
16660 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16661 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16662 fidl::encoding::EmptyStruct,
16663 Error,
16664 >>(
16665 fidl::encoding::FlexibleResult::new(result),
16666 self.tx_id,
16667 0x35f695b9b6c7217a,
16668 fidl::encoding::DynamicFlags::FLEXIBLE,
16669 )
16670 }
16671}
16672
16673#[must_use = "FIDL methods require a response to be sent"]
16674#[derive(Debug)]
16675pub struct SecureMemDeleteSecureHeapPhysicalRangeResponder {
16676 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16677 tx_id: u32,
16678}
16679
16680/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16681/// if the responder is dropped without sending a response, so that the client
16682/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16683impl std::ops::Drop for SecureMemDeleteSecureHeapPhysicalRangeResponder {
16684 fn drop(&mut self) {
16685 self.control_handle.shutdown();
16686 // Safety: drops once, never accessed again
16687 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16688 }
16689}
16690
16691impl fidl::endpoints::Responder for SecureMemDeleteSecureHeapPhysicalRangeResponder {
16692 type ControlHandle = SecureMemControlHandle;
16693
16694 fn control_handle(&self) -> &SecureMemControlHandle {
16695 &self.control_handle
16696 }
16697
16698 fn drop_without_shutdown(mut self) {
16699 // Safety: drops once, never accessed again due to mem::forget
16700 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16701 // Prevent Drop from running (which would shut down the channel)
16702 std::mem::forget(self);
16703 }
16704}
16705
16706impl SecureMemDeleteSecureHeapPhysicalRangeResponder {
16707 /// Sends a response to the FIDL transaction.
16708 ///
16709 /// Sets the channel to shutdown if an error occurs.
16710 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16711 let _result = self.send_raw(result);
16712 if _result.is_err() {
16713 self.control_handle.shutdown();
16714 }
16715 self.drop_without_shutdown();
16716 _result
16717 }
16718
16719 /// Similar to "send" but does not shutdown the channel if an error occurs.
16720 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16721 let _result = self.send_raw(result);
16722 self.drop_without_shutdown();
16723 _result
16724 }
16725
16726 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16727 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16728 fidl::encoding::EmptyStruct,
16729 Error,
16730 >>(
16731 fidl::encoding::FlexibleResult::new(result),
16732 self.tx_id,
16733 0xeaa58c650264c9e,
16734 fidl::encoding::DynamicFlags::FLEXIBLE,
16735 )
16736 }
16737}
16738
16739#[must_use = "FIDL methods require a response to be sent"]
16740#[derive(Debug)]
16741pub struct SecureMemModifySecureHeapPhysicalRangeResponder {
16742 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16743 tx_id: u32,
16744}
16745
16746/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16747/// if the responder is dropped without sending a response, so that the client
16748/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16749impl std::ops::Drop for SecureMemModifySecureHeapPhysicalRangeResponder {
16750 fn drop(&mut self) {
16751 self.control_handle.shutdown();
16752 // Safety: drops once, never accessed again
16753 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16754 }
16755}
16756
16757impl fidl::endpoints::Responder for SecureMemModifySecureHeapPhysicalRangeResponder {
16758 type ControlHandle = SecureMemControlHandle;
16759
16760 fn control_handle(&self) -> &SecureMemControlHandle {
16761 &self.control_handle
16762 }
16763
16764 fn drop_without_shutdown(mut self) {
16765 // Safety: drops once, never accessed again due to mem::forget
16766 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16767 // Prevent Drop from running (which would shut down the channel)
16768 std::mem::forget(self);
16769 }
16770}
16771
16772impl SecureMemModifySecureHeapPhysicalRangeResponder {
16773 /// Sends a response to the FIDL transaction.
16774 ///
16775 /// Sets the channel to shutdown if an error occurs.
16776 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16777 let _result = self.send_raw(result);
16778 if _result.is_err() {
16779 self.control_handle.shutdown();
16780 }
16781 self.drop_without_shutdown();
16782 _result
16783 }
16784
16785 /// Similar to "send" but does not shutdown the channel if an error occurs.
16786 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16787 let _result = self.send_raw(result);
16788 self.drop_without_shutdown();
16789 _result
16790 }
16791
16792 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16793 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16794 fidl::encoding::EmptyStruct,
16795 Error,
16796 >>(
16797 fidl::encoding::FlexibleResult::new(result),
16798 self.tx_id,
16799 0x60b7448aa1187734,
16800 fidl::encoding::DynamicFlags::FLEXIBLE,
16801 )
16802 }
16803}
16804
16805#[must_use = "FIDL methods require a response to be sent"]
16806#[derive(Debug)]
16807pub struct SecureMemZeroSubRangeResponder {
16808 control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16809 tx_id: u32,
16810}
16811
16812/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16813/// if the responder is dropped without sending a response, so that the client
16814/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16815impl std::ops::Drop for SecureMemZeroSubRangeResponder {
16816 fn drop(&mut self) {
16817 self.control_handle.shutdown();
16818 // Safety: drops once, never accessed again
16819 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16820 }
16821}
16822
16823impl fidl::endpoints::Responder for SecureMemZeroSubRangeResponder {
16824 type ControlHandle = SecureMemControlHandle;
16825
16826 fn control_handle(&self) -> &SecureMemControlHandle {
16827 &self.control_handle
16828 }
16829
16830 fn drop_without_shutdown(mut self) {
16831 // Safety: drops once, never accessed again due to mem::forget
16832 unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16833 // Prevent Drop from running (which would shut down the channel)
16834 std::mem::forget(self);
16835 }
16836}
16837
16838impl SecureMemZeroSubRangeResponder {
16839 /// Sends a response to the FIDL transaction.
16840 ///
16841 /// Sets the channel to shutdown if an error occurs.
16842 pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16843 let _result = self.send_raw(result);
16844 if _result.is_err() {
16845 self.control_handle.shutdown();
16846 }
16847 self.drop_without_shutdown();
16848 _result
16849 }
16850
16851 /// Similar to "send" but does not shutdown the channel if an error occurs.
16852 pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16853 let _result = self.send_raw(result);
16854 self.drop_without_shutdown();
16855 _result
16856 }
16857
16858 fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16859 self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16860 fidl::encoding::EmptyStruct,
16861 Error,
16862 >>(
16863 fidl::encoding::FlexibleResult::new(result),
16864 self.tx_id,
16865 0x5b25b7901a385ce5,
16866 fidl::encoding::DynamicFlags::FLEXIBLE,
16867 )
16868 }
16869}
16870
16871mod internal {
16872 use super::*;
16873
16874 impl AllocatorAllocateNonSharedCollectionRequest {
16875 #[inline(always)]
16876 fn max_ordinal_present(&self) -> u64 {
16877 if let Some(_) = self.collection_request {
16878 return 1;
16879 }
16880 0
16881 }
16882 }
16883
16884 impl fidl::encoding::ResourceTypeMarker for AllocatorAllocateNonSharedCollectionRequest {
16885 type Borrowed<'a> = &'a mut Self;
16886 fn take_or_borrow<'a>(
16887 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
16888 ) -> Self::Borrowed<'a> {
16889 value
16890 }
16891 }
16892
16893 unsafe impl fidl::encoding::TypeMarker for AllocatorAllocateNonSharedCollectionRequest {
16894 type Owned = Self;
16895
16896 #[inline(always)]
16897 fn inline_align(_context: fidl::encoding::Context) -> usize {
16898 8
16899 }
16900
16901 #[inline(always)]
16902 fn inline_size(_context: fidl::encoding::Context) -> usize {
16903 16
16904 }
16905 }
16906
16907 unsafe impl
16908 fidl::encoding::Encode<
16909 AllocatorAllocateNonSharedCollectionRequest,
16910 fidl::encoding::DefaultFuchsiaResourceDialect,
16911 > for &mut AllocatorAllocateNonSharedCollectionRequest
16912 {
16913 unsafe fn encode(
16914 self,
16915 encoder: &mut fidl::encoding::Encoder<
16916 '_,
16917 fidl::encoding::DefaultFuchsiaResourceDialect,
16918 >,
16919 offset: usize,
16920 mut depth: fidl::encoding::Depth,
16921 ) -> fidl::Result<()> {
16922 encoder.debug_check_bounds::<AllocatorAllocateNonSharedCollectionRequest>(offset);
16923 // Vector header
16924 let max_ordinal: u64 = self.max_ordinal_present();
16925 encoder.write_num(max_ordinal, offset);
16926 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
16927 // Calling encoder.out_of_line_offset(0) is not allowed.
16928 if max_ordinal == 0 {
16929 return Ok(());
16930 }
16931 depth.increment()?;
16932 let envelope_size = 8;
16933 let bytes_len = max_ordinal as usize * envelope_size;
16934 #[allow(unused_variables)]
16935 let offset = encoder.out_of_line_offset(bytes_len);
16936 let mut _prev_end_offset: usize = 0;
16937 if 1 > max_ordinal {
16938 return Ok(());
16939 }
16940
16941 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
16942 // are envelope_size bytes.
16943 let cur_offset: usize = (1 - 1) * envelope_size;
16944
16945 // Zero reserved fields.
16946 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
16947
16948 // Safety:
16949 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
16950 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
16951 // envelope_size bytes, there is always sufficient room.
16952 fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>, fidl::encoding::DefaultFuchsiaResourceDialect>(
16953 self.collection_request.as_mut().map(<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
16954 encoder, offset + cur_offset, depth
16955 )?;
16956
16957 _prev_end_offset = cur_offset + envelope_size;
16958
16959 Ok(())
16960 }
16961 }
16962
16963 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
16964 for AllocatorAllocateNonSharedCollectionRequest
16965 {
16966 #[inline(always)]
16967 fn new_empty() -> Self {
16968 Self::default()
16969 }
16970
16971 unsafe fn decode(
16972 &mut self,
16973 decoder: &mut fidl::encoding::Decoder<
16974 '_,
16975 fidl::encoding::DefaultFuchsiaResourceDialect,
16976 >,
16977 offset: usize,
16978 mut depth: fidl::encoding::Depth,
16979 ) -> fidl::Result<()> {
16980 decoder.debug_check_bounds::<Self>(offset);
16981 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
16982 None => return Err(fidl::Error::NotNullable),
16983 Some(len) => len,
16984 };
16985 // Calling decoder.out_of_line_offset(0) is not allowed.
16986 if len == 0 {
16987 return Ok(());
16988 };
16989 depth.increment()?;
16990 let envelope_size = 8;
16991 let bytes_len = len * envelope_size;
16992 let offset = decoder.out_of_line_offset(bytes_len)?;
16993 // Decode the envelope for each type.
16994 let mut _next_ordinal_to_read = 0;
16995 let mut next_offset = offset;
16996 let end_offset = offset + bytes_len;
16997 _next_ordinal_to_read += 1;
16998 if next_offset >= end_offset {
16999 return Ok(());
17000 }
17001
17002 // Decode unknown envelopes for gaps in ordinals.
17003 while _next_ordinal_to_read < 1 {
17004 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17005 _next_ordinal_to_read += 1;
17006 next_offset += envelope_size;
17007 }
17008
17009 let next_out_of_line = decoder.next_out_of_line();
17010 let handles_before = decoder.remaining_handles();
17011 if let Some((inlined, num_bytes, num_handles)) =
17012 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17013 {
17014 let member_inline_size = <fidl::encoding::Endpoint<
17015 fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17016 > as fidl::encoding::TypeMarker>::inline_size(
17017 decoder.context
17018 );
17019 if inlined != (member_inline_size <= 4) {
17020 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17021 }
17022 let inner_offset;
17023 let mut inner_depth = depth.clone();
17024 if inlined {
17025 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17026 inner_offset = next_offset;
17027 } else {
17028 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17029 inner_depth.increment()?;
17030 }
17031 let val_ref = self.collection_request.get_or_insert_with(|| {
17032 fidl::new_empty!(
17033 fidl::encoding::Endpoint<
17034 fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17035 >,
17036 fidl::encoding::DefaultFuchsiaResourceDialect
17037 )
17038 });
17039 fidl::decode!(
17040 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17041 fidl::encoding::DefaultFuchsiaResourceDialect,
17042 val_ref,
17043 decoder,
17044 inner_offset,
17045 inner_depth
17046 )?;
17047 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17048 {
17049 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17050 }
17051 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17052 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17053 }
17054 }
17055
17056 next_offset += envelope_size;
17057
17058 // Decode the remaining unknown envelopes.
17059 while next_offset < end_offset {
17060 _next_ordinal_to_read += 1;
17061 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17062 next_offset += envelope_size;
17063 }
17064
17065 Ok(())
17066 }
17067 }
17068
17069 impl AllocatorAllocateSharedCollectionRequest {
17070 #[inline(always)]
17071 fn max_ordinal_present(&self) -> u64 {
17072 if let Some(_) = self.token_request {
17073 return 1;
17074 }
17075 0
17076 }
17077 }
17078
17079 impl fidl::encoding::ResourceTypeMarker for AllocatorAllocateSharedCollectionRequest {
17080 type Borrowed<'a> = &'a mut Self;
17081 fn take_or_borrow<'a>(
17082 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17083 ) -> Self::Borrowed<'a> {
17084 value
17085 }
17086 }
17087
17088 unsafe impl fidl::encoding::TypeMarker for AllocatorAllocateSharedCollectionRequest {
17089 type Owned = Self;
17090
17091 #[inline(always)]
17092 fn inline_align(_context: fidl::encoding::Context) -> usize {
17093 8
17094 }
17095
17096 #[inline(always)]
17097 fn inline_size(_context: fidl::encoding::Context) -> usize {
17098 16
17099 }
17100 }
17101
17102 unsafe impl
17103 fidl::encoding::Encode<
17104 AllocatorAllocateSharedCollectionRequest,
17105 fidl::encoding::DefaultFuchsiaResourceDialect,
17106 > for &mut AllocatorAllocateSharedCollectionRequest
17107 {
17108 unsafe fn encode(
17109 self,
17110 encoder: &mut fidl::encoding::Encoder<
17111 '_,
17112 fidl::encoding::DefaultFuchsiaResourceDialect,
17113 >,
17114 offset: usize,
17115 mut depth: fidl::encoding::Depth,
17116 ) -> fidl::Result<()> {
17117 encoder.debug_check_bounds::<AllocatorAllocateSharedCollectionRequest>(offset);
17118 // Vector header
17119 let max_ordinal: u64 = self.max_ordinal_present();
17120 encoder.write_num(max_ordinal, offset);
17121 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17122 // Calling encoder.out_of_line_offset(0) is not allowed.
17123 if max_ordinal == 0 {
17124 return Ok(());
17125 }
17126 depth.increment()?;
17127 let envelope_size = 8;
17128 let bytes_len = max_ordinal as usize * envelope_size;
17129 #[allow(unused_variables)]
17130 let offset = encoder.out_of_line_offset(bytes_len);
17131 let mut _prev_end_offset: usize = 0;
17132 if 1 > max_ordinal {
17133 return Ok(());
17134 }
17135
17136 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17137 // are envelope_size bytes.
17138 let cur_offset: usize = (1 - 1) * envelope_size;
17139
17140 // Zero reserved fields.
17141 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17142
17143 // Safety:
17144 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17145 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17146 // envelope_size bytes, there is always sufficient room.
17147 fidl::encoding::encode_in_envelope_optional::<
17148 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
17149 fidl::encoding::DefaultFuchsiaResourceDialect,
17150 >(
17151 self.token_request.as_mut().map(
17152 <fidl::encoding::Endpoint<
17153 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17154 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17155 ),
17156 encoder,
17157 offset + cur_offset,
17158 depth,
17159 )?;
17160
17161 _prev_end_offset = cur_offset + envelope_size;
17162
17163 Ok(())
17164 }
17165 }
17166
17167 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17168 for AllocatorAllocateSharedCollectionRequest
17169 {
17170 #[inline(always)]
17171 fn new_empty() -> Self {
17172 Self::default()
17173 }
17174
17175 unsafe fn decode(
17176 &mut self,
17177 decoder: &mut fidl::encoding::Decoder<
17178 '_,
17179 fidl::encoding::DefaultFuchsiaResourceDialect,
17180 >,
17181 offset: usize,
17182 mut depth: fidl::encoding::Depth,
17183 ) -> fidl::Result<()> {
17184 decoder.debug_check_bounds::<Self>(offset);
17185 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17186 None => return Err(fidl::Error::NotNullable),
17187 Some(len) => len,
17188 };
17189 // Calling decoder.out_of_line_offset(0) is not allowed.
17190 if len == 0 {
17191 return Ok(());
17192 };
17193 depth.increment()?;
17194 let envelope_size = 8;
17195 let bytes_len = len * envelope_size;
17196 let offset = decoder.out_of_line_offset(bytes_len)?;
17197 // Decode the envelope for each type.
17198 let mut _next_ordinal_to_read = 0;
17199 let mut next_offset = offset;
17200 let end_offset = offset + bytes_len;
17201 _next_ordinal_to_read += 1;
17202 if next_offset >= end_offset {
17203 return Ok(());
17204 }
17205
17206 // Decode unknown envelopes for gaps in ordinals.
17207 while _next_ordinal_to_read < 1 {
17208 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17209 _next_ordinal_to_read += 1;
17210 next_offset += envelope_size;
17211 }
17212
17213 let next_out_of_line = decoder.next_out_of_line();
17214 let handles_before = decoder.remaining_handles();
17215 if let Some((inlined, num_bytes, num_handles)) =
17216 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17217 {
17218 let member_inline_size = <fidl::encoding::Endpoint<
17219 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17220 > as fidl::encoding::TypeMarker>::inline_size(
17221 decoder.context
17222 );
17223 if inlined != (member_inline_size <= 4) {
17224 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17225 }
17226 let inner_offset;
17227 let mut inner_depth = depth.clone();
17228 if inlined {
17229 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17230 inner_offset = next_offset;
17231 } else {
17232 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17233 inner_depth.increment()?;
17234 }
17235 let val_ref = self.token_request.get_or_insert_with(|| {
17236 fidl::new_empty!(
17237 fidl::encoding::Endpoint<
17238 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17239 >,
17240 fidl::encoding::DefaultFuchsiaResourceDialect
17241 )
17242 });
17243 fidl::decode!(
17244 fidl::encoding::Endpoint<
17245 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17246 >,
17247 fidl::encoding::DefaultFuchsiaResourceDialect,
17248 val_ref,
17249 decoder,
17250 inner_offset,
17251 inner_depth
17252 )?;
17253 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17254 {
17255 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17256 }
17257 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17258 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17259 }
17260 }
17261
17262 next_offset += envelope_size;
17263
17264 // Decode the remaining unknown envelopes.
17265 while next_offset < end_offset {
17266 _next_ordinal_to_read += 1;
17267 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17268 next_offset += envelope_size;
17269 }
17270
17271 Ok(())
17272 }
17273 }
17274
17275 impl AllocatorBindSharedCollectionRequest {
17276 #[inline(always)]
17277 fn max_ordinal_present(&self) -> u64 {
17278 if let Some(_) = self.buffer_collection_request {
17279 return 2;
17280 }
17281 if let Some(_) = self.token {
17282 return 1;
17283 }
17284 0
17285 }
17286 }
17287
17288 impl fidl::encoding::ResourceTypeMarker for AllocatorBindSharedCollectionRequest {
17289 type Borrowed<'a> = &'a mut Self;
17290 fn take_or_borrow<'a>(
17291 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17292 ) -> Self::Borrowed<'a> {
17293 value
17294 }
17295 }
17296
17297 unsafe impl fidl::encoding::TypeMarker for AllocatorBindSharedCollectionRequest {
17298 type Owned = Self;
17299
17300 #[inline(always)]
17301 fn inline_align(_context: fidl::encoding::Context) -> usize {
17302 8
17303 }
17304
17305 #[inline(always)]
17306 fn inline_size(_context: fidl::encoding::Context) -> usize {
17307 16
17308 }
17309 }
17310
17311 unsafe impl
17312 fidl::encoding::Encode<
17313 AllocatorBindSharedCollectionRequest,
17314 fidl::encoding::DefaultFuchsiaResourceDialect,
17315 > for &mut AllocatorBindSharedCollectionRequest
17316 {
17317 unsafe fn encode(
17318 self,
17319 encoder: &mut fidl::encoding::Encoder<
17320 '_,
17321 fidl::encoding::DefaultFuchsiaResourceDialect,
17322 >,
17323 offset: usize,
17324 mut depth: fidl::encoding::Depth,
17325 ) -> fidl::Result<()> {
17326 encoder.debug_check_bounds::<AllocatorBindSharedCollectionRequest>(offset);
17327 // Vector header
17328 let max_ordinal: u64 = self.max_ordinal_present();
17329 encoder.write_num(max_ordinal, offset);
17330 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17331 // Calling encoder.out_of_line_offset(0) is not allowed.
17332 if max_ordinal == 0 {
17333 return Ok(());
17334 }
17335 depth.increment()?;
17336 let envelope_size = 8;
17337 let bytes_len = max_ordinal as usize * envelope_size;
17338 #[allow(unused_variables)]
17339 let offset = encoder.out_of_line_offset(bytes_len);
17340 let mut _prev_end_offset: usize = 0;
17341 if 1 > max_ordinal {
17342 return Ok(());
17343 }
17344
17345 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17346 // are envelope_size bytes.
17347 let cur_offset: usize = (1 - 1) * envelope_size;
17348
17349 // Zero reserved fields.
17350 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17351
17352 // Safety:
17353 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17354 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17355 // envelope_size bytes, there is always sufficient room.
17356 fidl::encoding::encode_in_envelope_optional::<
17357 fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>,
17358 fidl::encoding::DefaultFuchsiaResourceDialect,
17359 >(
17360 self.token.as_mut().map(
17361 <fidl::encoding::Endpoint<
17362 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17363 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17364 ),
17365 encoder,
17366 offset + cur_offset,
17367 depth,
17368 )?;
17369
17370 _prev_end_offset = cur_offset + envelope_size;
17371 if 2 > max_ordinal {
17372 return Ok(());
17373 }
17374
17375 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17376 // are envelope_size bytes.
17377 let cur_offset: usize = (2 - 1) * envelope_size;
17378
17379 // Zero reserved fields.
17380 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17381
17382 // Safety:
17383 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17384 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17385 // envelope_size bytes, there is always sufficient room.
17386 fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>, fidl::encoding::DefaultFuchsiaResourceDialect>(
17387 self.buffer_collection_request.as_mut().map(<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
17388 encoder, offset + cur_offset, depth
17389 )?;
17390
17391 _prev_end_offset = cur_offset + envelope_size;
17392
17393 Ok(())
17394 }
17395 }
17396
17397 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17398 for AllocatorBindSharedCollectionRequest
17399 {
17400 #[inline(always)]
17401 fn new_empty() -> Self {
17402 Self::default()
17403 }
17404
17405 unsafe fn decode(
17406 &mut self,
17407 decoder: &mut fidl::encoding::Decoder<
17408 '_,
17409 fidl::encoding::DefaultFuchsiaResourceDialect,
17410 >,
17411 offset: usize,
17412 mut depth: fidl::encoding::Depth,
17413 ) -> fidl::Result<()> {
17414 decoder.debug_check_bounds::<Self>(offset);
17415 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17416 None => return Err(fidl::Error::NotNullable),
17417 Some(len) => len,
17418 };
17419 // Calling decoder.out_of_line_offset(0) is not allowed.
17420 if len == 0 {
17421 return Ok(());
17422 };
17423 depth.increment()?;
17424 let envelope_size = 8;
17425 let bytes_len = len * envelope_size;
17426 let offset = decoder.out_of_line_offset(bytes_len)?;
17427 // Decode the envelope for each type.
17428 let mut _next_ordinal_to_read = 0;
17429 let mut next_offset = offset;
17430 let end_offset = offset + bytes_len;
17431 _next_ordinal_to_read += 1;
17432 if next_offset >= end_offset {
17433 return Ok(());
17434 }
17435
17436 // Decode unknown envelopes for gaps in ordinals.
17437 while _next_ordinal_to_read < 1 {
17438 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17439 _next_ordinal_to_read += 1;
17440 next_offset += envelope_size;
17441 }
17442
17443 let next_out_of_line = decoder.next_out_of_line();
17444 let handles_before = decoder.remaining_handles();
17445 if let Some((inlined, num_bytes, num_handles)) =
17446 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17447 {
17448 let member_inline_size = <fidl::encoding::Endpoint<
17449 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17450 > as fidl::encoding::TypeMarker>::inline_size(
17451 decoder.context
17452 );
17453 if inlined != (member_inline_size <= 4) {
17454 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17455 }
17456 let inner_offset;
17457 let mut inner_depth = depth.clone();
17458 if inlined {
17459 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17460 inner_offset = next_offset;
17461 } else {
17462 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17463 inner_depth.increment()?;
17464 }
17465 let val_ref = self.token.get_or_insert_with(|| {
17466 fidl::new_empty!(
17467 fidl::encoding::Endpoint<
17468 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17469 >,
17470 fidl::encoding::DefaultFuchsiaResourceDialect
17471 )
17472 });
17473 fidl::decode!(
17474 fidl::encoding::Endpoint<
17475 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17476 >,
17477 fidl::encoding::DefaultFuchsiaResourceDialect,
17478 val_ref,
17479 decoder,
17480 inner_offset,
17481 inner_depth
17482 )?;
17483 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17484 {
17485 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17486 }
17487 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17488 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17489 }
17490 }
17491
17492 next_offset += envelope_size;
17493 _next_ordinal_to_read += 1;
17494 if next_offset >= end_offset {
17495 return Ok(());
17496 }
17497
17498 // Decode unknown envelopes for gaps in ordinals.
17499 while _next_ordinal_to_read < 2 {
17500 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17501 _next_ordinal_to_read += 1;
17502 next_offset += envelope_size;
17503 }
17504
17505 let next_out_of_line = decoder.next_out_of_line();
17506 let handles_before = decoder.remaining_handles();
17507 if let Some((inlined, num_bytes, num_handles)) =
17508 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17509 {
17510 let member_inline_size = <fidl::encoding::Endpoint<
17511 fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17512 > as fidl::encoding::TypeMarker>::inline_size(
17513 decoder.context
17514 );
17515 if inlined != (member_inline_size <= 4) {
17516 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17517 }
17518 let inner_offset;
17519 let mut inner_depth = depth.clone();
17520 if inlined {
17521 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17522 inner_offset = next_offset;
17523 } else {
17524 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17525 inner_depth.increment()?;
17526 }
17527 let val_ref = self.buffer_collection_request.get_or_insert_with(|| {
17528 fidl::new_empty!(
17529 fidl::encoding::Endpoint<
17530 fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17531 >,
17532 fidl::encoding::DefaultFuchsiaResourceDialect
17533 )
17534 });
17535 fidl::decode!(
17536 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17537 fidl::encoding::DefaultFuchsiaResourceDialect,
17538 val_ref,
17539 decoder,
17540 inner_offset,
17541 inner_depth
17542 )?;
17543 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17544 {
17545 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17546 }
17547 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17548 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17549 }
17550 }
17551
17552 next_offset += envelope_size;
17553
17554 // Decode the remaining unknown envelopes.
17555 while next_offset < end_offset {
17556 _next_ordinal_to_read += 1;
17557 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17558 next_offset += envelope_size;
17559 }
17560
17561 Ok(())
17562 }
17563 }
17564
17565 impl AllocatorGetVmoInfoRequest {
17566 #[inline(always)]
17567 fn max_ordinal_present(&self) -> u64 {
17568 if let Some(_) = self.vmo {
17569 return 1;
17570 }
17571 0
17572 }
17573 }
17574
17575 impl fidl::encoding::ResourceTypeMarker for AllocatorGetVmoInfoRequest {
17576 type Borrowed<'a> = &'a mut Self;
17577 fn take_or_borrow<'a>(
17578 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17579 ) -> Self::Borrowed<'a> {
17580 value
17581 }
17582 }
17583
17584 unsafe impl fidl::encoding::TypeMarker for AllocatorGetVmoInfoRequest {
17585 type Owned = Self;
17586
17587 #[inline(always)]
17588 fn inline_align(_context: fidl::encoding::Context) -> usize {
17589 8
17590 }
17591
17592 #[inline(always)]
17593 fn inline_size(_context: fidl::encoding::Context) -> usize {
17594 16
17595 }
17596 }
17597
17598 unsafe impl
17599 fidl::encoding::Encode<
17600 AllocatorGetVmoInfoRequest,
17601 fidl::encoding::DefaultFuchsiaResourceDialect,
17602 > for &mut AllocatorGetVmoInfoRequest
17603 {
17604 unsafe fn encode(
17605 self,
17606 encoder: &mut fidl::encoding::Encoder<
17607 '_,
17608 fidl::encoding::DefaultFuchsiaResourceDialect,
17609 >,
17610 offset: usize,
17611 mut depth: fidl::encoding::Depth,
17612 ) -> fidl::Result<()> {
17613 encoder.debug_check_bounds::<AllocatorGetVmoInfoRequest>(offset);
17614 // Vector header
17615 let max_ordinal: u64 = self.max_ordinal_present();
17616 encoder.write_num(max_ordinal, offset);
17617 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17618 // Calling encoder.out_of_line_offset(0) is not allowed.
17619 if max_ordinal == 0 {
17620 return Ok(());
17621 }
17622 depth.increment()?;
17623 let envelope_size = 8;
17624 let bytes_len = max_ordinal as usize * envelope_size;
17625 #[allow(unused_variables)]
17626 let offset = encoder.out_of_line_offset(bytes_len);
17627 let mut _prev_end_offset: usize = 0;
17628 if 1 > max_ordinal {
17629 return Ok(());
17630 }
17631
17632 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17633 // are envelope_size bytes.
17634 let cur_offset: usize = (1 - 1) * envelope_size;
17635
17636 // Zero reserved fields.
17637 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17638
17639 // Safety:
17640 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17641 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17642 // envelope_size bytes, there is always sufficient room.
17643 fidl::encoding::encode_in_envelope_optional::<
17644 fidl::encoding::HandleType<
17645 fidl::Vmo,
17646 { fidl::ObjectType::VMO.into_raw() },
17647 2147483648,
17648 >,
17649 fidl::encoding::DefaultFuchsiaResourceDialect,
17650 >(
17651 self.vmo.as_mut().map(
17652 <fidl::encoding::HandleType<
17653 fidl::Vmo,
17654 { fidl::ObjectType::VMO.into_raw() },
17655 2147483648,
17656 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17657 ),
17658 encoder,
17659 offset + cur_offset,
17660 depth,
17661 )?;
17662
17663 _prev_end_offset = cur_offset + envelope_size;
17664
17665 Ok(())
17666 }
17667 }
17668
17669 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17670 for AllocatorGetVmoInfoRequest
17671 {
17672 #[inline(always)]
17673 fn new_empty() -> Self {
17674 Self::default()
17675 }
17676
17677 unsafe fn decode(
17678 &mut self,
17679 decoder: &mut fidl::encoding::Decoder<
17680 '_,
17681 fidl::encoding::DefaultFuchsiaResourceDialect,
17682 >,
17683 offset: usize,
17684 mut depth: fidl::encoding::Depth,
17685 ) -> fidl::Result<()> {
17686 decoder.debug_check_bounds::<Self>(offset);
17687 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17688 None => return Err(fidl::Error::NotNullable),
17689 Some(len) => len,
17690 };
17691 // Calling decoder.out_of_line_offset(0) is not allowed.
17692 if len == 0 {
17693 return Ok(());
17694 };
17695 depth.increment()?;
17696 let envelope_size = 8;
17697 let bytes_len = len * envelope_size;
17698 let offset = decoder.out_of_line_offset(bytes_len)?;
17699 // Decode the envelope for each type.
17700 let mut _next_ordinal_to_read = 0;
17701 let mut next_offset = offset;
17702 let end_offset = offset + bytes_len;
17703 _next_ordinal_to_read += 1;
17704 if next_offset >= end_offset {
17705 return Ok(());
17706 }
17707
17708 // Decode unknown envelopes for gaps in ordinals.
17709 while _next_ordinal_to_read < 1 {
17710 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17711 _next_ordinal_to_read += 1;
17712 next_offset += envelope_size;
17713 }
17714
17715 let next_out_of_line = decoder.next_out_of_line();
17716 let handles_before = decoder.remaining_handles();
17717 if let Some((inlined, num_bytes, num_handles)) =
17718 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17719 {
17720 let member_inline_size = <fidl::encoding::HandleType<
17721 fidl::Vmo,
17722 { fidl::ObjectType::VMO.into_raw() },
17723 2147483648,
17724 > as fidl::encoding::TypeMarker>::inline_size(
17725 decoder.context
17726 );
17727 if inlined != (member_inline_size <= 4) {
17728 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17729 }
17730 let inner_offset;
17731 let mut inner_depth = depth.clone();
17732 if inlined {
17733 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17734 inner_offset = next_offset;
17735 } else {
17736 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17737 inner_depth.increment()?;
17738 }
17739 let val_ref =
17740 self.vmo.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
17741 fidl::decode!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
17742 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17743 {
17744 return Err(fidl::Error::InvalidNumBytesInEnvelope);
17745 }
17746 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17747 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17748 }
17749 }
17750
17751 next_offset += envelope_size;
17752
17753 // Decode the remaining unknown envelopes.
17754 while next_offset < end_offset {
17755 _next_ordinal_to_read += 1;
17756 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17757 next_offset += envelope_size;
17758 }
17759
17760 Ok(())
17761 }
17762 }
17763
17764 impl AllocatorGetVmoInfoResponse {
17765 #[inline(always)]
17766 fn max_ordinal_present(&self) -> u64 {
17767 if let Some(_) = self.close_weak_asap {
17768 return 3;
17769 }
17770 if let Some(_) = self.buffer_index {
17771 return 2;
17772 }
17773 if let Some(_) = self.buffer_collection_id {
17774 return 1;
17775 }
17776 0
17777 }
17778 }
17779
17780 impl fidl::encoding::ResourceTypeMarker for AllocatorGetVmoInfoResponse {
17781 type Borrowed<'a> = &'a mut Self;
17782 fn take_or_borrow<'a>(
17783 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17784 ) -> Self::Borrowed<'a> {
17785 value
17786 }
17787 }
17788
17789 unsafe impl fidl::encoding::TypeMarker for AllocatorGetVmoInfoResponse {
17790 type Owned = Self;
17791
17792 #[inline(always)]
17793 fn inline_align(_context: fidl::encoding::Context) -> usize {
17794 8
17795 }
17796
17797 #[inline(always)]
17798 fn inline_size(_context: fidl::encoding::Context) -> usize {
17799 16
17800 }
17801 }
17802
17803 unsafe impl
17804 fidl::encoding::Encode<
17805 AllocatorGetVmoInfoResponse,
17806 fidl::encoding::DefaultFuchsiaResourceDialect,
17807 > for &mut AllocatorGetVmoInfoResponse
17808 {
17809 unsafe fn encode(
17810 self,
17811 encoder: &mut fidl::encoding::Encoder<
17812 '_,
17813 fidl::encoding::DefaultFuchsiaResourceDialect,
17814 >,
17815 offset: usize,
17816 mut depth: fidl::encoding::Depth,
17817 ) -> fidl::Result<()> {
17818 encoder.debug_check_bounds::<AllocatorGetVmoInfoResponse>(offset);
17819 // Vector header
17820 let max_ordinal: u64 = self.max_ordinal_present();
17821 encoder.write_num(max_ordinal, offset);
17822 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17823 // Calling encoder.out_of_line_offset(0) is not allowed.
17824 if max_ordinal == 0 {
17825 return Ok(());
17826 }
17827 depth.increment()?;
17828 let envelope_size = 8;
17829 let bytes_len = max_ordinal as usize * envelope_size;
17830 #[allow(unused_variables)]
17831 let offset = encoder.out_of_line_offset(bytes_len);
17832 let mut _prev_end_offset: usize = 0;
17833 if 1 > max_ordinal {
17834 return Ok(());
17835 }
17836
17837 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17838 // are envelope_size bytes.
17839 let cur_offset: usize = (1 - 1) * envelope_size;
17840
17841 // Zero reserved fields.
17842 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17843
17844 // Safety:
17845 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17846 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17847 // envelope_size bytes, there is always sufficient room.
17848 fidl::encoding::encode_in_envelope_optional::<
17849 u64,
17850 fidl::encoding::DefaultFuchsiaResourceDialect,
17851 >(
17852 self.buffer_collection_id
17853 .as_ref()
17854 .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
17855 encoder,
17856 offset + cur_offset,
17857 depth,
17858 )?;
17859
17860 _prev_end_offset = cur_offset + envelope_size;
17861 if 2 > max_ordinal {
17862 return Ok(());
17863 }
17864
17865 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17866 // are envelope_size bytes.
17867 let cur_offset: usize = (2 - 1) * envelope_size;
17868
17869 // Zero reserved fields.
17870 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17871
17872 // Safety:
17873 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17874 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17875 // envelope_size bytes, there is always sufficient room.
17876 fidl::encoding::encode_in_envelope_optional::<
17877 u64,
17878 fidl::encoding::DefaultFuchsiaResourceDialect,
17879 >(
17880 self.buffer_index.as_ref().map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
17881 encoder,
17882 offset + cur_offset,
17883 depth,
17884 )?;
17885
17886 _prev_end_offset = cur_offset + envelope_size;
17887 if 3 > max_ordinal {
17888 return Ok(());
17889 }
17890
17891 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17892 // are envelope_size bytes.
17893 let cur_offset: usize = (3 - 1) * envelope_size;
17894
17895 // Zero reserved fields.
17896 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17897
17898 // Safety:
17899 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17900 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17901 // envelope_size bytes, there is always sufficient room.
17902 fidl::encoding::encode_in_envelope_optional::<
17903 fidl::encoding::HandleType<
17904 fidl::EventPair,
17905 { fidl::ObjectType::EVENTPAIR.into_raw() },
17906 2147483648,
17907 >,
17908 fidl::encoding::DefaultFuchsiaResourceDialect,
17909 >(
17910 self.close_weak_asap.as_mut().map(
17911 <fidl::encoding::HandleType<
17912 fidl::EventPair,
17913 { fidl::ObjectType::EVENTPAIR.into_raw() },
17914 2147483648,
17915 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17916 ),
17917 encoder,
17918 offset + cur_offset,
17919 depth,
17920 )?;
17921
17922 _prev_end_offset = cur_offset + envelope_size;
17923
17924 Ok(())
17925 }
17926 }
17927
17928 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17929 for AllocatorGetVmoInfoResponse
17930 {
17931 #[inline(always)]
17932 fn new_empty() -> Self {
17933 Self::default()
17934 }
17935
17936 unsafe fn decode(
17937 &mut self,
17938 decoder: &mut fidl::encoding::Decoder<
17939 '_,
17940 fidl::encoding::DefaultFuchsiaResourceDialect,
17941 >,
17942 offset: usize,
17943 mut depth: fidl::encoding::Depth,
17944 ) -> fidl::Result<()> {
17945 decoder.debug_check_bounds::<Self>(offset);
17946 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17947 None => return Err(fidl::Error::NotNullable),
17948 Some(len) => len,
17949 };
17950 // Calling decoder.out_of_line_offset(0) is not allowed.
17951 if len == 0 {
17952 return Ok(());
17953 };
17954 depth.increment()?;
17955 let envelope_size = 8;
17956 let bytes_len = len * envelope_size;
17957 let offset = decoder.out_of_line_offset(bytes_len)?;
17958 // Decode the envelope for each type.
17959 let mut _next_ordinal_to_read = 0;
17960 let mut next_offset = offset;
17961 let end_offset = offset + bytes_len;
17962 _next_ordinal_to_read += 1;
17963 if next_offset >= end_offset {
17964 return Ok(());
17965 }
17966
17967 // Decode unknown envelopes for gaps in ordinals.
17968 while _next_ordinal_to_read < 1 {
17969 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17970 _next_ordinal_to_read += 1;
17971 next_offset += envelope_size;
17972 }
17973
17974 let next_out_of_line = decoder.next_out_of_line();
17975 let handles_before = decoder.remaining_handles();
17976 if let Some((inlined, num_bytes, num_handles)) =
17977 fidl::encoding::decode_envelope_header(decoder, next_offset)?
17978 {
17979 let member_inline_size =
17980 <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
17981 if inlined != (member_inline_size <= 4) {
17982 return Err(fidl::Error::InvalidInlineBitInEnvelope);
17983 }
17984 let inner_offset;
17985 let mut inner_depth = depth.clone();
17986 if inlined {
17987 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17988 inner_offset = next_offset;
17989 } else {
17990 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17991 inner_depth.increment()?;
17992 }
17993 let val_ref = self.buffer_collection_id.get_or_insert_with(|| {
17994 fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
17995 });
17996 fidl::decode!(
17997 u64,
17998 fidl::encoding::DefaultFuchsiaResourceDialect,
17999 val_ref,
18000 decoder,
18001 inner_offset,
18002 inner_depth
18003 )?;
18004 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18005 {
18006 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18007 }
18008 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18009 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18010 }
18011 }
18012
18013 next_offset += envelope_size;
18014 _next_ordinal_to_read += 1;
18015 if next_offset >= end_offset {
18016 return Ok(());
18017 }
18018
18019 // Decode unknown envelopes for gaps in ordinals.
18020 while _next_ordinal_to_read < 2 {
18021 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18022 _next_ordinal_to_read += 1;
18023 next_offset += envelope_size;
18024 }
18025
18026 let next_out_of_line = decoder.next_out_of_line();
18027 let handles_before = decoder.remaining_handles();
18028 if let Some((inlined, num_bytes, num_handles)) =
18029 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18030 {
18031 let member_inline_size =
18032 <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18033 if inlined != (member_inline_size <= 4) {
18034 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18035 }
18036 let inner_offset;
18037 let mut inner_depth = depth.clone();
18038 if inlined {
18039 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18040 inner_offset = next_offset;
18041 } else {
18042 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18043 inner_depth.increment()?;
18044 }
18045 let val_ref = self.buffer_index.get_or_insert_with(|| {
18046 fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
18047 });
18048 fidl::decode!(
18049 u64,
18050 fidl::encoding::DefaultFuchsiaResourceDialect,
18051 val_ref,
18052 decoder,
18053 inner_offset,
18054 inner_depth
18055 )?;
18056 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18057 {
18058 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18059 }
18060 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18061 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18062 }
18063 }
18064
18065 next_offset += envelope_size;
18066 _next_ordinal_to_read += 1;
18067 if next_offset >= end_offset {
18068 return Ok(());
18069 }
18070
18071 // Decode unknown envelopes for gaps in ordinals.
18072 while _next_ordinal_to_read < 3 {
18073 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18074 _next_ordinal_to_read += 1;
18075 next_offset += envelope_size;
18076 }
18077
18078 let next_out_of_line = decoder.next_out_of_line();
18079 let handles_before = decoder.remaining_handles();
18080 if let Some((inlined, num_bytes, num_handles)) =
18081 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18082 {
18083 let member_inline_size = <fidl::encoding::HandleType<
18084 fidl::EventPair,
18085 { fidl::ObjectType::EVENTPAIR.into_raw() },
18086 2147483648,
18087 > as fidl::encoding::TypeMarker>::inline_size(
18088 decoder.context
18089 );
18090 if inlined != (member_inline_size <= 4) {
18091 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18092 }
18093 let inner_offset;
18094 let mut inner_depth = depth.clone();
18095 if inlined {
18096 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18097 inner_offset = next_offset;
18098 } else {
18099 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18100 inner_depth.increment()?;
18101 }
18102 let val_ref =
18103 self.close_weak_asap.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
18104 fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18105 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18106 {
18107 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18108 }
18109 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18110 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18111 }
18112 }
18113
18114 next_offset += envelope_size;
18115
18116 // Decode the remaining unknown envelopes.
18117 while next_offset < end_offset {
18118 _next_ordinal_to_read += 1;
18119 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18120 next_offset += envelope_size;
18121 }
18122
18123 Ok(())
18124 }
18125 }
18126
18127 impl BufferCollectionAttachLifetimeTrackingRequest {
18128 #[inline(always)]
18129 fn max_ordinal_present(&self) -> u64 {
18130 if let Some(_) = self.buffers_remaining {
18131 return 2;
18132 }
18133 if let Some(_) = self.server_end {
18134 return 1;
18135 }
18136 0
18137 }
18138 }
18139
18140 impl fidl::encoding::ResourceTypeMarker for BufferCollectionAttachLifetimeTrackingRequest {
18141 type Borrowed<'a> = &'a mut Self;
18142 fn take_or_borrow<'a>(
18143 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18144 ) -> Self::Borrowed<'a> {
18145 value
18146 }
18147 }
18148
18149 unsafe impl fidl::encoding::TypeMarker for BufferCollectionAttachLifetimeTrackingRequest {
18150 type Owned = Self;
18151
18152 #[inline(always)]
18153 fn inline_align(_context: fidl::encoding::Context) -> usize {
18154 8
18155 }
18156
18157 #[inline(always)]
18158 fn inline_size(_context: fidl::encoding::Context) -> usize {
18159 16
18160 }
18161 }
18162
18163 unsafe impl
18164 fidl::encoding::Encode<
18165 BufferCollectionAttachLifetimeTrackingRequest,
18166 fidl::encoding::DefaultFuchsiaResourceDialect,
18167 > for &mut BufferCollectionAttachLifetimeTrackingRequest
18168 {
18169 unsafe fn encode(
18170 self,
18171 encoder: &mut fidl::encoding::Encoder<
18172 '_,
18173 fidl::encoding::DefaultFuchsiaResourceDialect,
18174 >,
18175 offset: usize,
18176 mut depth: fidl::encoding::Depth,
18177 ) -> fidl::Result<()> {
18178 encoder.debug_check_bounds::<BufferCollectionAttachLifetimeTrackingRequest>(offset);
18179 // Vector header
18180 let max_ordinal: u64 = self.max_ordinal_present();
18181 encoder.write_num(max_ordinal, offset);
18182 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18183 // Calling encoder.out_of_line_offset(0) is not allowed.
18184 if max_ordinal == 0 {
18185 return Ok(());
18186 }
18187 depth.increment()?;
18188 let envelope_size = 8;
18189 let bytes_len = max_ordinal as usize * envelope_size;
18190 #[allow(unused_variables)]
18191 let offset = encoder.out_of_line_offset(bytes_len);
18192 let mut _prev_end_offset: usize = 0;
18193 if 1 > max_ordinal {
18194 return Ok(());
18195 }
18196
18197 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18198 // are envelope_size bytes.
18199 let cur_offset: usize = (1 - 1) * envelope_size;
18200
18201 // Zero reserved fields.
18202 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18203
18204 // Safety:
18205 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18206 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18207 // envelope_size bytes, there is always sufficient room.
18208 fidl::encoding::encode_in_envelope_optional::<
18209 fidl::encoding::HandleType<
18210 fidl::EventPair,
18211 { fidl::ObjectType::EVENTPAIR.into_raw() },
18212 2147483648,
18213 >,
18214 fidl::encoding::DefaultFuchsiaResourceDialect,
18215 >(
18216 self.server_end.as_mut().map(
18217 <fidl::encoding::HandleType<
18218 fidl::EventPair,
18219 { fidl::ObjectType::EVENTPAIR.into_raw() },
18220 2147483648,
18221 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
18222 ),
18223 encoder,
18224 offset + cur_offset,
18225 depth,
18226 )?;
18227
18228 _prev_end_offset = cur_offset + envelope_size;
18229 if 2 > max_ordinal {
18230 return Ok(());
18231 }
18232
18233 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18234 // are envelope_size bytes.
18235 let cur_offset: usize = (2 - 1) * envelope_size;
18236
18237 // Zero reserved fields.
18238 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18239
18240 // Safety:
18241 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18242 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18243 // envelope_size bytes, there is always sufficient room.
18244 fidl::encoding::encode_in_envelope_optional::<
18245 u32,
18246 fidl::encoding::DefaultFuchsiaResourceDialect,
18247 >(
18248 self.buffers_remaining
18249 .as_ref()
18250 .map(<u32 as fidl::encoding::ValueTypeMarker>::borrow),
18251 encoder,
18252 offset + cur_offset,
18253 depth,
18254 )?;
18255
18256 _prev_end_offset = cur_offset + envelope_size;
18257
18258 Ok(())
18259 }
18260 }
18261
18262 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18263 for BufferCollectionAttachLifetimeTrackingRequest
18264 {
18265 #[inline(always)]
18266 fn new_empty() -> Self {
18267 Self::default()
18268 }
18269
18270 unsafe fn decode(
18271 &mut self,
18272 decoder: &mut fidl::encoding::Decoder<
18273 '_,
18274 fidl::encoding::DefaultFuchsiaResourceDialect,
18275 >,
18276 offset: usize,
18277 mut depth: fidl::encoding::Depth,
18278 ) -> fidl::Result<()> {
18279 decoder.debug_check_bounds::<Self>(offset);
18280 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18281 None => return Err(fidl::Error::NotNullable),
18282 Some(len) => len,
18283 };
18284 // Calling decoder.out_of_line_offset(0) is not allowed.
18285 if len == 0 {
18286 return Ok(());
18287 };
18288 depth.increment()?;
18289 let envelope_size = 8;
18290 let bytes_len = len * envelope_size;
18291 let offset = decoder.out_of_line_offset(bytes_len)?;
18292 // Decode the envelope for each type.
18293 let mut _next_ordinal_to_read = 0;
18294 let mut next_offset = offset;
18295 let end_offset = offset + bytes_len;
18296 _next_ordinal_to_read += 1;
18297 if next_offset >= end_offset {
18298 return Ok(());
18299 }
18300
18301 // Decode unknown envelopes for gaps in ordinals.
18302 while _next_ordinal_to_read < 1 {
18303 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18304 _next_ordinal_to_read += 1;
18305 next_offset += envelope_size;
18306 }
18307
18308 let next_out_of_line = decoder.next_out_of_line();
18309 let handles_before = decoder.remaining_handles();
18310 if let Some((inlined, num_bytes, num_handles)) =
18311 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18312 {
18313 let member_inline_size = <fidl::encoding::HandleType<
18314 fidl::EventPair,
18315 { fidl::ObjectType::EVENTPAIR.into_raw() },
18316 2147483648,
18317 > as fidl::encoding::TypeMarker>::inline_size(
18318 decoder.context
18319 );
18320 if inlined != (member_inline_size <= 4) {
18321 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18322 }
18323 let inner_offset;
18324 let mut inner_depth = depth.clone();
18325 if inlined {
18326 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18327 inner_offset = next_offset;
18328 } else {
18329 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18330 inner_depth.increment()?;
18331 }
18332 let val_ref =
18333 self.server_end.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
18334 fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18335 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18336 {
18337 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18338 }
18339 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18340 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18341 }
18342 }
18343
18344 next_offset += envelope_size;
18345 _next_ordinal_to_read += 1;
18346 if next_offset >= end_offset {
18347 return Ok(());
18348 }
18349
18350 // Decode unknown envelopes for gaps in ordinals.
18351 while _next_ordinal_to_read < 2 {
18352 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18353 _next_ordinal_to_read += 1;
18354 next_offset += envelope_size;
18355 }
18356
18357 let next_out_of_line = decoder.next_out_of_line();
18358 let handles_before = decoder.remaining_handles();
18359 if let Some((inlined, num_bytes, num_handles)) =
18360 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18361 {
18362 let member_inline_size =
18363 <u32 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18364 if inlined != (member_inline_size <= 4) {
18365 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18366 }
18367 let inner_offset;
18368 let mut inner_depth = depth.clone();
18369 if inlined {
18370 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18371 inner_offset = next_offset;
18372 } else {
18373 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18374 inner_depth.increment()?;
18375 }
18376 let val_ref = self.buffers_remaining.get_or_insert_with(|| {
18377 fidl::new_empty!(u32, fidl::encoding::DefaultFuchsiaResourceDialect)
18378 });
18379 fidl::decode!(
18380 u32,
18381 fidl::encoding::DefaultFuchsiaResourceDialect,
18382 val_ref,
18383 decoder,
18384 inner_offset,
18385 inner_depth
18386 )?;
18387 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18388 {
18389 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18390 }
18391 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18392 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18393 }
18394 }
18395
18396 next_offset += envelope_size;
18397
18398 // Decode the remaining unknown envelopes.
18399 while next_offset < end_offset {
18400 _next_ordinal_to_read += 1;
18401 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18402 next_offset += envelope_size;
18403 }
18404
18405 Ok(())
18406 }
18407 }
18408
18409 impl BufferCollectionAttachTokenRequest {
18410 #[inline(always)]
18411 fn max_ordinal_present(&self) -> u64 {
18412 if let Some(_) = self.token_request {
18413 return 2;
18414 }
18415 if let Some(_) = self.rights_attenuation_mask {
18416 return 1;
18417 }
18418 0
18419 }
18420 }
18421
18422 impl fidl::encoding::ResourceTypeMarker for BufferCollectionAttachTokenRequest {
18423 type Borrowed<'a> = &'a mut Self;
18424 fn take_or_borrow<'a>(
18425 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18426 ) -> Self::Borrowed<'a> {
18427 value
18428 }
18429 }
18430
18431 unsafe impl fidl::encoding::TypeMarker for BufferCollectionAttachTokenRequest {
18432 type Owned = Self;
18433
18434 #[inline(always)]
18435 fn inline_align(_context: fidl::encoding::Context) -> usize {
18436 8
18437 }
18438
18439 #[inline(always)]
18440 fn inline_size(_context: fidl::encoding::Context) -> usize {
18441 16
18442 }
18443 }
18444
18445 unsafe impl
18446 fidl::encoding::Encode<
18447 BufferCollectionAttachTokenRequest,
18448 fidl::encoding::DefaultFuchsiaResourceDialect,
18449 > for &mut BufferCollectionAttachTokenRequest
18450 {
18451 unsafe fn encode(
18452 self,
18453 encoder: &mut fidl::encoding::Encoder<
18454 '_,
18455 fidl::encoding::DefaultFuchsiaResourceDialect,
18456 >,
18457 offset: usize,
18458 mut depth: fidl::encoding::Depth,
18459 ) -> fidl::Result<()> {
18460 encoder.debug_check_bounds::<BufferCollectionAttachTokenRequest>(offset);
18461 // Vector header
18462 let max_ordinal: u64 = self.max_ordinal_present();
18463 encoder.write_num(max_ordinal, offset);
18464 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18465 // Calling encoder.out_of_line_offset(0) is not allowed.
18466 if max_ordinal == 0 {
18467 return Ok(());
18468 }
18469 depth.increment()?;
18470 let envelope_size = 8;
18471 let bytes_len = max_ordinal as usize * envelope_size;
18472 #[allow(unused_variables)]
18473 let offset = encoder.out_of_line_offset(bytes_len);
18474 let mut _prev_end_offset: usize = 0;
18475 if 1 > max_ordinal {
18476 return Ok(());
18477 }
18478
18479 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18480 // are envelope_size bytes.
18481 let cur_offset: usize = (1 - 1) * envelope_size;
18482
18483 // Zero reserved fields.
18484 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18485
18486 // Safety:
18487 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18488 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18489 // envelope_size bytes, there is always sufficient room.
18490 fidl::encoding::encode_in_envelope_optional::<
18491 fidl::Rights,
18492 fidl::encoding::DefaultFuchsiaResourceDialect,
18493 >(
18494 self.rights_attenuation_mask
18495 .as_ref()
18496 .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
18497 encoder,
18498 offset + cur_offset,
18499 depth,
18500 )?;
18501
18502 _prev_end_offset = cur_offset + envelope_size;
18503 if 2 > max_ordinal {
18504 return Ok(());
18505 }
18506
18507 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18508 // are envelope_size bytes.
18509 let cur_offset: usize = (2 - 1) * envelope_size;
18510
18511 // Zero reserved fields.
18512 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18513
18514 // Safety:
18515 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18516 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18517 // envelope_size bytes, there is always sufficient room.
18518 fidl::encoding::encode_in_envelope_optional::<
18519 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
18520 fidl::encoding::DefaultFuchsiaResourceDialect,
18521 >(
18522 self.token_request.as_mut().map(
18523 <fidl::encoding::Endpoint<
18524 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18525 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
18526 ),
18527 encoder,
18528 offset + cur_offset,
18529 depth,
18530 )?;
18531
18532 _prev_end_offset = cur_offset + envelope_size;
18533
18534 Ok(())
18535 }
18536 }
18537
18538 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18539 for BufferCollectionAttachTokenRequest
18540 {
18541 #[inline(always)]
18542 fn new_empty() -> Self {
18543 Self::default()
18544 }
18545
18546 unsafe fn decode(
18547 &mut self,
18548 decoder: &mut fidl::encoding::Decoder<
18549 '_,
18550 fidl::encoding::DefaultFuchsiaResourceDialect,
18551 >,
18552 offset: usize,
18553 mut depth: fidl::encoding::Depth,
18554 ) -> fidl::Result<()> {
18555 decoder.debug_check_bounds::<Self>(offset);
18556 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18557 None => return Err(fidl::Error::NotNullable),
18558 Some(len) => len,
18559 };
18560 // Calling decoder.out_of_line_offset(0) is not allowed.
18561 if len == 0 {
18562 return Ok(());
18563 };
18564 depth.increment()?;
18565 let envelope_size = 8;
18566 let bytes_len = len * envelope_size;
18567 let offset = decoder.out_of_line_offset(bytes_len)?;
18568 // Decode the envelope for each type.
18569 let mut _next_ordinal_to_read = 0;
18570 let mut next_offset = offset;
18571 let end_offset = offset + bytes_len;
18572 _next_ordinal_to_read += 1;
18573 if next_offset >= end_offset {
18574 return Ok(());
18575 }
18576
18577 // Decode unknown envelopes for gaps in ordinals.
18578 while _next_ordinal_to_read < 1 {
18579 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18580 _next_ordinal_to_read += 1;
18581 next_offset += envelope_size;
18582 }
18583
18584 let next_out_of_line = decoder.next_out_of_line();
18585 let handles_before = decoder.remaining_handles();
18586 if let Some((inlined, num_bytes, num_handles)) =
18587 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18588 {
18589 let member_inline_size =
18590 <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18591 if inlined != (member_inline_size <= 4) {
18592 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18593 }
18594 let inner_offset;
18595 let mut inner_depth = depth.clone();
18596 if inlined {
18597 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18598 inner_offset = next_offset;
18599 } else {
18600 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18601 inner_depth.increment()?;
18602 }
18603 let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
18604 fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
18605 });
18606 fidl::decode!(
18607 fidl::Rights,
18608 fidl::encoding::DefaultFuchsiaResourceDialect,
18609 val_ref,
18610 decoder,
18611 inner_offset,
18612 inner_depth
18613 )?;
18614 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18615 {
18616 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18617 }
18618 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18619 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18620 }
18621 }
18622
18623 next_offset += envelope_size;
18624 _next_ordinal_to_read += 1;
18625 if next_offset >= end_offset {
18626 return Ok(());
18627 }
18628
18629 // Decode unknown envelopes for gaps in ordinals.
18630 while _next_ordinal_to_read < 2 {
18631 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18632 _next_ordinal_to_read += 1;
18633 next_offset += envelope_size;
18634 }
18635
18636 let next_out_of_line = decoder.next_out_of_line();
18637 let handles_before = decoder.remaining_handles();
18638 if let Some((inlined, num_bytes, num_handles)) =
18639 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18640 {
18641 let member_inline_size = <fidl::encoding::Endpoint<
18642 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18643 > as fidl::encoding::TypeMarker>::inline_size(
18644 decoder.context
18645 );
18646 if inlined != (member_inline_size <= 4) {
18647 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18648 }
18649 let inner_offset;
18650 let mut inner_depth = depth.clone();
18651 if inlined {
18652 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18653 inner_offset = next_offset;
18654 } else {
18655 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18656 inner_depth.increment()?;
18657 }
18658 let val_ref = self.token_request.get_or_insert_with(|| {
18659 fidl::new_empty!(
18660 fidl::encoding::Endpoint<
18661 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18662 >,
18663 fidl::encoding::DefaultFuchsiaResourceDialect
18664 )
18665 });
18666 fidl::decode!(
18667 fidl::encoding::Endpoint<
18668 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18669 >,
18670 fidl::encoding::DefaultFuchsiaResourceDialect,
18671 val_ref,
18672 decoder,
18673 inner_offset,
18674 inner_depth
18675 )?;
18676 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18677 {
18678 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18679 }
18680 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18681 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18682 }
18683 }
18684
18685 next_offset += envelope_size;
18686
18687 // Decode the remaining unknown envelopes.
18688 while next_offset < end_offset {
18689 _next_ordinal_to_read += 1;
18690 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18691 next_offset += envelope_size;
18692 }
18693
18694 Ok(())
18695 }
18696 }
18697
18698 impl BufferCollectionInfo {
18699 #[inline(always)]
18700 fn max_ordinal_present(&self) -> u64 {
18701 if let Some(_) = self.buffer_collection_id {
18702 return 3;
18703 }
18704 if let Some(_) = self.buffers {
18705 return 2;
18706 }
18707 if let Some(_) = self.settings {
18708 return 1;
18709 }
18710 0
18711 }
18712 }
18713
18714 impl fidl::encoding::ResourceTypeMarker for BufferCollectionInfo {
18715 type Borrowed<'a> = &'a mut Self;
18716 fn take_or_borrow<'a>(
18717 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18718 ) -> Self::Borrowed<'a> {
18719 value
18720 }
18721 }
18722
18723 unsafe impl fidl::encoding::TypeMarker for BufferCollectionInfo {
18724 type Owned = Self;
18725
18726 #[inline(always)]
18727 fn inline_align(_context: fidl::encoding::Context) -> usize {
18728 8
18729 }
18730
18731 #[inline(always)]
18732 fn inline_size(_context: fidl::encoding::Context) -> usize {
18733 16
18734 }
18735 }
18736
18737 unsafe impl
18738 fidl::encoding::Encode<BufferCollectionInfo, fidl::encoding::DefaultFuchsiaResourceDialect>
18739 for &mut BufferCollectionInfo
18740 {
18741 unsafe fn encode(
18742 self,
18743 encoder: &mut fidl::encoding::Encoder<
18744 '_,
18745 fidl::encoding::DefaultFuchsiaResourceDialect,
18746 >,
18747 offset: usize,
18748 mut depth: fidl::encoding::Depth,
18749 ) -> fidl::Result<()> {
18750 encoder.debug_check_bounds::<BufferCollectionInfo>(offset);
18751 // Vector header
18752 let max_ordinal: u64 = self.max_ordinal_present();
18753 encoder.write_num(max_ordinal, offset);
18754 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18755 // Calling encoder.out_of_line_offset(0) is not allowed.
18756 if max_ordinal == 0 {
18757 return Ok(());
18758 }
18759 depth.increment()?;
18760 let envelope_size = 8;
18761 let bytes_len = max_ordinal as usize * envelope_size;
18762 #[allow(unused_variables)]
18763 let offset = encoder.out_of_line_offset(bytes_len);
18764 let mut _prev_end_offset: usize = 0;
18765 if 1 > max_ordinal {
18766 return Ok(());
18767 }
18768
18769 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18770 // are envelope_size bytes.
18771 let cur_offset: usize = (1 - 1) * envelope_size;
18772
18773 // Zero reserved fields.
18774 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18775
18776 // Safety:
18777 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18778 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18779 // envelope_size bytes, there is always sufficient room.
18780 fidl::encoding::encode_in_envelope_optional::<
18781 SingleBufferSettings,
18782 fidl::encoding::DefaultFuchsiaResourceDialect,
18783 >(
18784 self.settings
18785 .as_ref()
18786 .map(<SingleBufferSettings as fidl::encoding::ValueTypeMarker>::borrow),
18787 encoder,
18788 offset + cur_offset,
18789 depth,
18790 )?;
18791
18792 _prev_end_offset = cur_offset + envelope_size;
18793 if 2 > max_ordinal {
18794 return Ok(());
18795 }
18796
18797 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18798 // are envelope_size bytes.
18799 let cur_offset: usize = (2 - 1) * envelope_size;
18800
18801 // Zero reserved fields.
18802 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18803
18804 // Safety:
18805 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18806 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18807 // envelope_size bytes, there is always sufficient room.
18808 fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect>(
18809 self.buffers.as_mut().map(<fidl::encoding::Vector<VmoBuffer, 128> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
18810 encoder, offset + cur_offset, depth
18811 )?;
18812
18813 _prev_end_offset = cur_offset + envelope_size;
18814 if 3 > max_ordinal {
18815 return Ok(());
18816 }
18817
18818 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18819 // are envelope_size bytes.
18820 let cur_offset: usize = (3 - 1) * envelope_size;
18821
18822 // Zero reserved fields.
18823 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18824
18825 // Safety:
18826 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18827 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18828 // envelope_size bytes, there is always sufficient room.
18829 fidl::encoding::encode_in_envelope_optional::<
18830 u64,
18831 fidl::encoding::DefaultFuchsiaResourceDialect,
18832 >(
18833 self.buffer_collection_id
18834 .as_ref()
18835 .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
18836 encoder,
18837 offset + cur_offset,
18838 depth,
18839 )?;
18840
18841 _prev_end_offset = cur_offset + envelope_size;
18842
18843 Ok(())
18844 }
18845 }
18846
18847 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18848 for BufferCollectionInfo
18849 {
18850 #[inline(always)]
18851 fn new_empty() -> Self {
18852 Self::default()
18853 }
18854
18855 unsafe fn decode(
18856 &mut self,
18857 decoder: &mut fidl::encoding::Decoder<
18858 '_,
18859 fidl::encoding::DefaultFuchsiaResourceDialect,
18860 >,
18861 offset: usize,
18862 mut depth: fidl::encoding::Depth,
18863 ) -> fidl::Result<()> {
18864 decoder.debug_check_bounds::<Self>(offset);
18865 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18866 None => return Err(fidl::Error::NotNullable),
18867 Some(len) => len,
18868 };
18869 // Calling decoder.out_of_line_offset(0) is not allowed.
18870 if len == 0 {
18871 return Ok(());
18872 };
18873 depth.increment()?;
18874 let envelope_size = 8;
18875 let bytes_len = len * envelope_size;
18876 let offset = decoder.out_of_line_offset(bytes_len)?;
18877 // Decode the envelope for each type.
18878 let mut _next_ordinal_to_read = 0;
18879 let mut next_offset = offset;
18880 let end_offset = offset + bytes_len;
18881 _next_ordinal_to_read += 1;
18882 if next_offset >= end_offset {
18883 return Ok(());
18884 }
18885
18886 // Decode unknown envelopes for gaps in ordinals.
18887 while _next_ordinal_to_read < 1 {
18888 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18889 _next_ordinal_to_read += 1;
18890 next_offset += envelope_size;
18891 }
18892
18893 let next_out_of_line = decoder.next_out_of_line();
18894 let handles_before = decoder.remaining_handles();
18895 if let Some((inlined, num_bytes, num_handles)) =
18896 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18897 {
18898 let member_inline_size =
18899 <SingleBufferSettings as fidl::encoding::TypeMarker>::inline_size(
18900 decoder.context,
18901 );
18902 if inlined != (member_inline_size <= 4) {
18903 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18904 }
18905 let inner_offset;
18906 let mut inner_depth = depth.clone();
18907 if inlined {
18908 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18909 inner_offset = next_offset;
18910 } else {
18911 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18912 inner_depth.increment()?;
18913 }
18914 let val_ref = self.settings.get_or_insert_with(|| {
18915 fidl::new_empty!(
18916 SingleBufferSettings,
18917 fidl::encoding::DefaultFuchsiaResourceDialect
18918 )
18919 });
18920 fidl::decode!(
18921 SingleBufferSettings,
18922 fidl::encoding::DefaultFuchsiaResourceDialect,
18923 val_ref,
18924 decoder,
18925 inner_offset,
18926 inner_depth
18927 )?;
18928 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18929 {
18930 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18931 }
18932 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18933 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18934 }
18935 }
18936
18937 next_offset += envelope_size;
18938 _next_ordinal_to_read += 1;
18939 if next_offset >= end_offset {
18940 return Ok(());
18941 }
18942
18943 // Decode unknown envelopes for gaps in ordinals.
18944 while _next_ordinal_to_read < 2 {
18945 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18946 _next_ordinal_to_read += 1;
18947 next_offset += envelope_size;
18948 }
18949
18950 let next_out_of_line = decoder.next_out_of_line();
18951 let handles_before = decoder.remaining_handles();
18952 if let Some((inlined, num_bytes, num_handles)) =
18953 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18954 {
18955 let member_inline_size = <fidl::encoding::Vector<VmoBuffer, 128> as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18956 if inlined != (member_inline_size <= 4) {
18957 return Err(fidl::Error::InvalidInlineBitInEnvelope);
18958 }
18959 let inner_offset;
18960 let mut inner_depth = depth.clone();
18961 if inlined {
18962 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18963 inner_offset = next_offset;
18964 } else {
18965 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18966 inner_depth.increment()?;
18967 }
18968 let val_ref =
18969 self.buffers.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect));
18970 fidl::decode!(fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18971 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18972 {
18973 return Err(fidl::Error::InvalidNumBytesInEnvelope);
18974 }
18975 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18976 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18977 }
18978 }
18979
18980 next_offset += envelope_size;
18981 _next_ordinal_to_read += 1;
18982 if next_offset >= end_offset {
18983 return Ok(());
18984 }
18985
18986 // Decode unknown envelopes for gaps in ordinals.
18987 while _next_ordinal_to_read < 3 {
18988 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18989 _next_ordinal_to_read += 1;
18990 next_offset += envelope_size;
18991 }
18992
18993 let next_out_of_line = decoder.next_out_of_line();
18994 let handles_before = decoder.remaining_handles();
18995 if let Some((inlined, num_bytes, num_handles)) =
18996 fidl::encoding::decode_envelope_header(decoder, next_offset)?
18997 {
18998 let member_inline_size =
18999 <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19000 if inlined != (member_inline_size <= 4) {
19001 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19002 }
19003 let inner_offset;
19004 let mut inner_depth = depth.clone();
19005 if inlined {
19006 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19007 inner_offset = next_offset;
19008 } else {
19009 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19010 inner_depth.increment()?;
19011 }
19012 let val_ref = self.buffer_collection_id.get_or_insert_with(|| {
19013 fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
19014 });
19015 fidl::decode!(
19016 u64,
19017 fidl::encoding::DefaultFuchsiaResourceDialect,
19018 val_ref,
19019 decoder,
19020 inner_offset,
19021 inner_depth
19022 )?;
19023 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19024 {
19025 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19026 }
19027 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19028 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19029 }
19030 }
19031
19032 next_offset += envelope_size;
19033
19034 // Decode the remaining unknown envelopes.
19035 while next_offset < end_offset {
19036 _next_ordinal_to_read += 1;
19037 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19038 next_offset += envelope_size;
19039 }
19040
19041 Ok(())
19042 }
19043 }
19044
19045 impl BufferCollectionSetConstraintsRequest {
19046 #[inline(always)]
19047 fn max_ordinal_present(&self) -> u64 {
19048 if let Some(_) = self.constraints {
19049 return 1;
19050 }
19051 0
19052 }
19053 }
19054
19055 impl fidl::encoding::ResourceTypeMarker for BufferCollectionSetConstraintsRequest {
19056 type Borrowed<'a> = &'a mut Self;
19057 fn take_or_borrow<'a>(
19058 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19059 ) -> Self::Borrowed<'a> {
19060 value
19061 }
19062 }
19063
19064 unsafe impl fidl::encoding::TypeMarker for BufferCollectionSetConstraintsRequest {
19065 type Owned = Self;
19066
19067 #[inline(always)]
19068 fn inline_align(_context: fidl::encoding::Context) -> usize {
19069 8
19070 }
19071
19072 #[inline(always)]
19073 fn inline_size(_context: fidl::encoding::Context) -> usize {
19074 16
19075 }
19076 }
19077
19078 unsafe impl
19079 fidl::encoding::Encode<
19080 BufferCollectionSetConstraintsRequest,
19081 fidl::encoding::DefaultFuchsiaResourceDialect,
19082 > for &mut BufferCollectionSetConstraintsRequest
19083 {
19084 unsafe fn encode(
19085 self,
19086 encoder: &mut fidl::encoding::Encoder<
19087 '_,
19088 fidl::encoding::DefaultFuchsiaResourceDialect,
19089 >,
19090 offset: usize,
19091 mut depth: fidl::encoding::Depth,
19092 ) -> fidl::Result<()> {
19093 encoder.debug_check_bounds::<BufferCollectionSetConstraintsRequest>(offset);
19094 // Vector header
19095 let max_ordinal: u64 = self.max_ordinal_present();
19096 encoder.write_num(max_ordinal, offset);
19097 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19098 // Calling encoder.out_of_line_offset(0) is not allowed.
19099 if max_ordinal == 0 {
19100 return Ok(());
19101 }
19102 depth.increment()?;
19103 let envelope_size = 8;
19104 let bytes_len = max_ordinal as usize * envelope_size;
19105 #[allow(unused_variables)]
19106 let offset = encoder.out_of_line_offset(bytes_len);
19107 let mut _prev_end_offset: usize = 0;
19108 if 1 > max_ordinal {
19109 return Ok(());
19110 }
19111
19112 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19113 // are envelope_size bytes.
19114 let cur_offset: usize = (1 - 1) * envelope_size;
19115
19116 // Zero reserved fields.
19117 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19118
19119 // Safety:
19120 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19121 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19122 // envelope_size bytes, there is always sufficient room.
19123 fidl::encoding::encode_in_envelope_optional::<
19124 BufferCollectionConstraints,
19125 fidl::encoding::DefaultFuchsiaResourceDialect,
19126 >(
19127 self.constraints
19128 .as_ref()
19129 .map(<BufferCollectionConstraints as fidl::encoding::ValueTypeMarker>::borrow),
19130 encoder,
19131 offset + cur_offset,
19132 depth,
19133 )?;
19134
19135 _prev_end_offset = cur_offset + envelope_size;
19136
19137 Ok(())
19138 }
19139 }
19140
19141 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19142 for BufferCollectionSetConstraintsRequest
19143 {
19144 #[inline(always)]
19145 fn new_empty() -> Self {
19146 Self::default()
19147 }
19148
19149 unsafe fn decode(
19150 &mut self,
19151 decoder: &mut fidl::encoding::Decoder<
19152 '_,
19153 fidl::encoding::DefaultFuchsiaResourceDialect,
19154 >,
19155 offset: usize,
19156 mut depth: fidl::encoding::Depth,
19157 ) -> fidl::Result<()> {
19158 decoder.debug_check_bounds::<Self>(offset);
19159 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19160 None => return Err(fidl::Error::NotNullable),
19161 Some(len) => len,
19162 };
19163 // Calling decoder.out_of_line_offset(0) is not allowed.
19164 if len == 0 {
19165 return Ok(());
19166 };
19167 depth.increment()?;
19168 let envelope_size = 8;
19169 let bytes_len = len * envelope_size;
19170 let offset = decoder.out_of_line_offset(bytes_len)?;
19171 // Decode the envelope for each type.
19172 let mut _next_ordinal_to_read = 0;
19173 let mut next_offset = offset;
19174 let end_offset = offset + bytes_len;
19175 _next_ordinal_to_read += 1;
19176 if next_offset >= end_offset {
19177 return Ok(());
19178 }
19179
19180 // Decode unknown envelopes for gaps in ordinals.
19181 while _next_ordinal_to_read < 1 {
19182 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19183 _next_ordinal_to_read += 1;
19184 next_offset += envelope_size;
19185 }
19186
19187 let next_out_of_line = decoder.next_out_of_line();
19188 let handles_before = decoder.remaining_handles();
19189 if let Some((inlined, num_bytes, num_handles)) =
19190 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19191 {
19192 let member_inline_size =
19193 <BufferCollectionConstraints as fidl::encoding::TypeMarker>::inline_size(
19194 decoder.context,
19195 );
19196 if inlined != (member_inline_size <= 4) {
19197 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19198 }
19199 let inner_offset;
19200 let mut inner_depth = depth.clone();
19201 if inlined {
19202 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19203 inner_offset = next_offset;
19204 } else {
19205 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19206 inner_depth.increment()?;
19207 }
19208 let val_ref = self.constraints.get_or_insert_with(|| {
19209 fidl::new_empty!(
19210 BufferCollectionConstraints,
19211 fidl::encoding::DefaultFuchsiaResourceDialect
19212 )
19213 });
19214 fidl::decode!(
19215 BufferCollectionConstraints,
19216 fidl::encoding::DefaultFuchsiaResourceDialect,
19217 val_ref,
19218 decoder,
19219 inner_offset,
19220 inner_depth
19221 )?;
19222 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19223 {
19224 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19225 }
19226 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19227 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19228 }
19229 }
19230
19231 next_offset += envelope_size;
19232
19233 // Decode the remaining unknown envelopes.
19234 while next_offset < end_offset {
19235 _next_ordinal_to_read += 1;
19236 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19237 next_offset += envelope_size;
19238 }
19239
19240 Ok(())
19241 }
19242 }
19243
19244 impl BufferCollectionTokenCreateBufferCollectionTokenGroupRequest {
19245 #[inline(always)]
19246 fn max_ordinal_present(&self) -> u64 {
19247 if let Some(_) = self.group_request {
19248 return 1;
19249 }
19250 0
19251 }
19252 }
19253
19254 impl fidl::encoding::ResourceTypeMarker
19255 for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19256 {
19257 type Borrowed<'a> = &'a mut Self;
19258 fn take_or_borrow<'a>(
19259 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19260 ) -> Self::Borrowed<'a> {
19261 value
19262 }
19263 }
19264
19265 unsafe impl fidl::encoding::TypeMarker
19266 for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19267 {
19268 type Owned = Self;
19269
19270 #[inline(always)]
19271 fn inline_align(_context: fidl::encoding::Context) -> usize {
19272 8
19273 }
19274
19275 #[inline(always)]
19276 fn inline_size(_context: fidl::encoding::Context) -> usize {
19277 16
19278 }
19279 }
19280
19281 unsafe impl
19282 fidl::encoding::Encode<
19283 BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
19284 fidl::encoding::DefaultFuchsiaResourceDialect,
19285 > for &mut BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19286 {
19287 unsafe fn encode(
19288 self,
19289 encoder: &mut fidl::encoding::Encoder<
19290 '_,
19291 fidl::encoding::DefaultFuchsiaResourceDialect,
19292 >,
19293 offset: usize,
19294 mut depth: fidl::encoding::Depth,
19295 ) -> fidl::Result<()> {
19296 encoder
19297 .debug_check_bounds::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
19298 offset,
19299 );
19300 // Vector header
19301 let max_ordinal: u64 = self.max_ordinal_present();
19302 encoder.write_num(max_ordinal, offset);
19303 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19304 // Calling encoder.out_of_line_offset(0) is not allowed.
19305 if max_ordinal == 0 {
19306 return Ok(());
19307 }
19308 depth.increment()?;
19309 let envelope_size = 8;
19310 let bytes_len = max_ordinal as usize * envelope_size;
19311 #[allow(unused_variables)]
19312 let offset = encoder.out_of_line_offset(bytes_len);
19313 let mut _prev_end_offset: usize = 0;
19314 if 1 > max_ordinal {
19315 return Ok(());
19316 }
19317
19318 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19319 // are envelope_size bytes.
19320 let cur_offset: usize = (1 - 1) * envelope_size;
19321
19322 // Zero reserved fields.
19323 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19324
19325 // Safety:
19326 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19327 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19328 // envelope_size bytes, there is always sufficient room.
19329 fidl::encoding::encode_in_envelope_optional::<
19330 fidl::encoding::Endpoint<
19331 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19332 >,
19333 fidl::encoding::DefaultFuchsiaResourceDialect,
19334 >(
19335 self.group_request.as_mut().map(
19336 <fidl::encoding::Endpoint<
19337 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19338 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19339 ),
19340 encoder,
19341 offset + cur_offset,
19342 depth,
19343 )?;
19344
19345 _prev_end_offset = cur_offset + envelope_size;
19346
19347 Ok(())
19348 }
19349 }
19350
19351 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19352 for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19353 {
19354 #[inline(always)]
19355 fn new_empty() -> Self {
19356 Self::default()
19357 }
19358
19359 unsafe fn decode(
19360 &mut self,
19361 decoder: &mut fidl::encoding::Decoder<
19362 '_,
19363 fidl::encoding::DefaultFuchsiaResourceDialect,
19364 >,
19365 offset: usize,
19366 mut depth: fidl::encoding::Depth,
19367 ) -> fidl::Result<()> {
19368 decoder.debug_check_bounds::<Self>(offset);
19369 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19370 None => return Err(fidl::Error::NotNullable),
19371 Some(len) => len,
19372 };
19373 // Calling decoder.out_of_line_offset(0) is not allowed.
19374 if len == 0 {
19375 return Ok(());
19376 };
19377 depth.increment()?;
19378 let envelope_size = 8;
19379 let bytes_len = len * envelope_size;
19380 let offset = decoder.out_of_line_offset(bytes_len)?;
19381 // Decode the envelope for each type.
19382 let mut _next_ordinal_to_read = 0;
19383 let mut next_offset = offset;
19384 let end_offset = offset + bytes_len;
19385 _next_ordinal_to_read += 1;
19386 if next_offset >= end_offset {
19387 return Ok(());
19388 }
19389
19390 // Decode unknown envelopes for gaps in ordinals.
19391 while _next_ordinal_to_read < 1 {
19392 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19393 _next_ordinal_to_read += 1;
19394 next_offset += envelope_size;
19395 }
19396
19397 let next_out_of_line = decoder.next_out_of_line();
19398 let handles_before = decoder.remaining_handles();
19399 if let Some((inlined, num_bytes, num_handles)) =
19400 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19401 {
19402 let member_inline_size = <fidl::encoding::Endpoint<
19403 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19404 > as fidl::encoding::TypeMarker>::inline_size(
19405 decoder.context
19406 );
19407 if inlined != (member_inline_size <= 4) {
19408 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19409 }
19410 let inner_offset;
19411 let mut inner_depth = depth.clone();
19412 if inlined {
19413 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19414 inner_offset = next_offset;
19415 } else {
19416 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19417 inner_depth.increment()?;
19418 }
19419 let val_ref = self.group_request.get_or_insert_with(|| {
19420 fidl::new_empty!(
19421 fidl::encoding::Endpoint<
19422 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19423 >,
19424 fidl::encoding::DefaultFuchsiaResourceDialect
19425 )
19426 });
19427 fidl::decode!(
19428 fidl::encoding::Endpoint<
19429 fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19430 >,
19431 fidl::encoding::DefaultFuchsiaResourceDialect,
19432 val_ref,
19433 decoder,
19434 inner_offset,
19435 inner_depth
19436 )?;
19437 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19438 {
19439 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19440 }
19441 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19442 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19443 }
19444 }
19445
19446 next_offset += envelope_size;
19447
19448 // Decode the remaining unknown envelopes.
19449 while next_offset < end_offset {
19450 _next_ordinal_to_read += 1;
19451 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19452 next_offset += envelope_size;
19453 }
19454
19455 Ok(())
19456 }
19457 }
19458
19459 impl BufferCollectionTokenDuplicateRequest {
19460 #[inline(always)]
19461 fn max_ordinal_present(&self) -> u64 {
19462 if let Some(_) = self.token_request {
19463 return 2;
19464 }
19465 if let Some(_) = self.rights_attenuation_mask {
19466 return 1;
19467 }
19468 0
19469 }
19470 }
19471
19472 impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenDuplicateRequest {
19473 type Borrowed<'a> = &'a mut Self;
19474 fn take_or_borrow<'a>(
19475 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19476 ) -> Self::Borrowed<'a> {
19477 value
19478 }
19479 }
19480
19481 unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenDuplicateRequest {
19482 type Owned = Self;
19483
19484 #[inline(always)]
19485 fn inline_align(_context: fidl::encoding::Context) -> usize {
19486 8
19487 }
19488
19489 #[inline(always)]
19490 fn inline_size(_context: fidl::encoding::Context) -> usize {
19491 16
19492 }
19493 }
19494
19495 unsafe impl
19496 fidl::encoding::Encode<
19497 BufferCollectionTokenDuplicateRequest,
19498 fidl::encoding::DefaultFuchsiaResourceDialect,
19499 > for &mut BufferCollectionTokenDuplicateRequest
19500 {
19501 unsafe fn encode(
19502 self,
19503 encoder: &mut fidl::encoding::Encoder<
19504 '_,
19505 fidl::encoding::DefaultFuchsiaResourceDialect,
19506 >,
19507 offset: usize,
19508 mut depth: fidl::encoding::Depth,
19509 ) -> fidl::Result<()> {
19510 encoder.debug_check_bounds::<BufferCollectionTokenDuplicateRequest>(offset);
19511 // Vector header
19512 let max_ordinal: u64 = self.max_ordinal_present();
19513 encoder.write_num(max_ordinal, offset);
19514 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19515 // Calling encoder.out_of_line_offset(0) is not allowed.
19516 if max_ordinal == 0 {
19517 return Ok(());
19518 }
19519 depth.increment()?;
19520 let envelope_size = 8;
19521 let bytes_len = max_ordinal as usize * envelope_size;
19522 #[allow(unused_variables)]
19523 let offset = encoder.out_of_line_offset(bytes_len);
19524 let mut _prev_end_offset: usize = 0;
19525 if 1 > max_ordinal {
19526 return Ok(());
19527 }
19528
19529 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19530 // are envelope_size bytes.
19531 let cur_offset: usize = (1 - 1) * envelope_size;
19532
19533 // Zero reserved fields.
19534 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19535
19536 // Safety:
19537 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19538 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19539 // envelope_size bytes, there is always sufficient room.
19540 fidl::encoding::encode_in_envelope_optional::<
19541 fidl::Rights,
19542 fidl::encoding::DefaultFuchsiaResourceDialect,
19543 >(
19544 self.rights_attenuation_mask
19545 .as_ref()
19546 .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
19547 encoder,
19548 offset + cur_offset,
19549 depth,
19550 )?;
19551
19552 _prev_end_offset = cur_offset + envelope_size;
19553 if 2 > max_ordinal {
19554 return Ok(());
19555 }
19556
19557 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19558 // are envelope_size bytes.
19559 let cur_offset: usize = (2 - 1) * envelope_size;
19560
19561 // Zero reserved fields.
19562 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19563
19564 // Safety:
19565 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19566 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19567 // envelope_size bytes, there is always sufficient room.
19568 fidl::encoding::encode_in_envelope_optional::<
19569 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
19570 fidl::encoding::DefaultFuchsiaResourceDialect,
19571 >(
19572 self.token_request.as_mut().map(
19573 <fidl::encoding::Endpoint<
19574 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19575 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19576 ),
19577 encoder,
19578 offset + cur_offset,
19579 depth,
19580 )?;
19581
19582 _prev_end_offset = cur_offset + envelope_size;
19583
19584 Ok(())
19585 }
19586 }
19587
19588 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19589 for BufferCollectionTokenDuplicateRequest
19590 {
19591 #[inline(always)]
19592 fn new_empty() -> Self {
19593 Self::default()
19594 }
19595
19596 unsafe fn decode(
19597 &mut self,
19598 decoder: &mut fidl::encoding::Decoder<
19599 '_,
19600 fidl::encoding::DefaultFuchsiaResourceDialect,
19601 >,
19602 offset: usize,
19603 mut depth: fidl::encoding::Depth,
19604 ) -> fidl::Result<()> {
19605 decoder.debug_check_bounds::<Self>(offset);
19606 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19607 None => return Err(fidl::Error::NotNullable),
19608 Some(len) => len,
19609 };
19610 // Calling decoder.out_of_line_offset(0) is not allowed.
19611 if len == 0 {
19612 return Ok(());
19613 };
19614 depth.increment()?;
19615 let envelope_size = 8;
19616 let bytes_len = len * envelope_size;
19617 let offset = decoder.out_of_line_offset(bytes_len)?;
19618 // Decode the envelope for each type.
19619 let mut _next_ordinal_to_read = 0;
19620 let mut next_offset = offset;
19621 let end_offset = offset + bytes_len;
19622 _next_ordinal_to_read += 1;
19623 if next_offset >= end_offset {
19624 return Ok(());
19625 }
19626
19627 // Decode unknown envelopes for gaps in ordinals.
19628 while _next_ordinal_to_read < 1 {
19629 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19630 _next_ordinal_to_read += 1;
19631 next_offset += envelope_size;
19632 }
19633
19634 let next_out_of_line = decoder.next_out_of_line();
19635 let handles_before = decoder.remaining_handles();
19636 if let Some((inlined, num_bytes, num_handles)) =
19637 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19638 {
19639 let member_inline_size =
19640 <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19641 if inlined != (member_inline_size <= 4) {
19642 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19643 }
19644 let inner_offset;
19645 let mut inner_depth = depth.clone();
19646 if inlined {
19647 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19648 inner_offset = next_offset;
19649 } else {
19650 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19651 inner_depth.increment()?;
19652 }
19653 let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
19654 fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
19655 });
19656 fidl::decode!(
19657 fidl::Rights,
19658 fidl::encoding::DefaultFuchsiaResourceDialect,
19659 val_ref,
19660 decoder,
19661 inner_offset,
19662 inner_depth
19663 )?;
19664 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19665 {
19666 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19667 }
19668 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19669 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19670 }
19671 }
19672
19673 next_offset += envelope_size;
19674 _next_ordinal_to_read += 1;
19675 if next_offset >= end_offset {
19676 return Ok(());
19677 }
19678
19679 // Decode unknown envelopes for gaps in ordinals.
19680 while _next_ordinal_to_read < 2 {
19681 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19682 _next_ordinal_to_read += 1;
19683 next_offset += envelope_size;
19684 }
19685
19686 let next_out_of_line = decoder.next_out_of_line();
19687 let handles_before = decoder.remaining_handles();
19688 if let Some((inlined, num_bytes, num_handles)) =
19689 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19690 {
19691 let member_inline_size = <fidl::encoding::Endpoint<
19692 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19693 > as fidl::encoding::TypeMarker>::inline_size(
19694 decoder.context
19695 );
19696 if inlined != (member_inline_size <= 4) {
19697 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19698 }
19699 let inner_offset;
19700 let mut inner_depth = depth.clone();
19701 if inlined {
19702 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19703 inner_offset = next_offset;
19704 } else {
19705 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19706 inner_depth.increment()?;
19707 }
19708 let val_ref = self.token_request.get_or_insert_with(|| {
19709 fidl::new_empty!(
19710 fidl::encoding::Endpoint<
19711 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19712 >,
19713 fidl::encoding::DefaultFuchsiaResourceDialect
19714 )
19715 });
19716 fidl::decode!(
19717 fidl::encoding::Endpoint<
19718 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19719 >,
19720 fidl::encoding::DefaultFuchsiaResourceDialect,
19721 val_ref,
19722 decoder,
19723 inner_offset,
19724 inner_depth
19725 )?;
19726 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19727 {
19728 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19729 }
19730 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19731 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19732 }
19733 }
19734
19735 next_offset += envelope_size;
19736
19737 // Decode the remaining unknown envelopes.
19738 while next_offset < end_offset {
19739 _next_ordinal_to_read += 1;
19740 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19741 next_offset += envelope_size;
19742 }
19743
19744 Ok(())
19745 }
19746 }
19747
19748 impl BufferCollectionTokenGroupCreateChildRequest {
19749 #[inline(always)]
19750 fn max_ordinal_present(&self) -> u64 {
19751 if let Some(_) = self.rights_attenuation_mask {
19752 return 2;
19753 }
19754 if let Some(_) = self.token_request {
19755 return 1;
19756 }
19757 0
19758 }
19759 }
19760
19761 impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenGroupCreateChildRequest {
19762 type Borrowed<'a> = &'a mut Self;
19763 fn take_or_borrow<'a>(
19764 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19765 ) -> Self::Borrowed<'a> {
19766 value
19767 }
19768 }
19769
19770 unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenGroupCreateChildRequest {
19771 type Owned = Self;
19772
19773 #[inline(always)]
19774 fn inline_align(_context: fidl::encoding::Context) -> usize {
19775 8
19776 }
19777
19778 #[inline(always)]
19779 fn inline_size(_context: fidl::encoding::Context) -> usize {
19780 16
19781 }
19782 }
19783
19784 unsafe impl
19785 fidl::encoding::Encode<
19786 BufferCollectionTokenGroupCreateChildRequest,
19787 fidl::encoding::DefaultFuchsiaResourceDialect,
19788 > for &mut BufferCollectionTokenGroupCreateChildRequest
19789 {
19790 unsafe fn encode(
19791 self,
19792 encoder: &mut fidl::encoding::Encoder<
19793 '_,
19794 fidl::encoding::DefaultFuchsiaResourceDialect,
19795 >,
19796 offset: usize,
19797 mut depth: fidl::encoding::Depth,
19798 ) -> fidl::Result<()> {
19799 encoder.debug_check_bounds::<BufferCollectionTokenGroupCreateChildRequest>(offset);
19800 // Vector header
19801 let max_ordinal: u64 = self.max_ordinal_present();
19802 encoder.write_num(max_ordinal, offset);
19803 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19804 // Calling encoder.out_of_line_offset(0) is not allowed.
19805 if max_ordinal == 0 {
19806 return Ok(());
19807 }
19808 depth.increment()?;
19809 let envelope_size = 8;
19810 let bytes_len = max_ordinal as usize * envelope_size;
19811 #[allow(unused_variables)]
19812 let offset = encoder.out_of_line_offset(bytes_len);
19813 let mut _prev_end_offset: usize = 0;
19814 if 1 > max_ordinal {
19815 return Ok(());
19816 }
19817
19818 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19819 // are envelope_size bytes.
19820 let cur_offset: usize = (1 - 1) * envelope_size;
19821
19822 // Zero reserved fields.
19823 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19824
19825 // Safety:
19826 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19827 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19828 // envelope_size bytes, there is always sufficient room.
19829 fidl::encoding::encode_in_envelope_optional::<
19830 fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
19831 fidl::encoding::DefaultFuchsiaResourceDialect,
19832 >(
19833 self.token_request.as_mut().map(
19834 <fidl::encoding::Endpoint<
19835 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19836 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19837 ),
19838 encoder,
19839 offset + cur_offset,
19840 depth,
19841 )?;
19842
19843 _prev_end_offset = cur_offset + envelope_size;
19844 if 2 > max_ordinal {
19845 return Ok(());
19846 }
19847
19848 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19849 // are envelope_size bytes.
19850 let cur_offset: usize = (2 - 1) * envelope_size;
19851
19852 // Zero reserved fields.
19853 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19854
19855 // Safety:
19856 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19857 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19858 // envelope_size bytes, there is always sufficient room.
19859 fidl::encoding::encode_in_envelope_optional::<
19860 fidl::Rights,
19861 fidl::encoding::DefaultFuchsiaResourceDialect,
19862 >(
19863 self.rights_attenuation_mask
19864 .as_ref()
19865 .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
19866 encoder,
19867 offset + cur_offset,
19868 depth,
19869 )?;
19870
19871 _prev_end_offset = cur_offset + envelope_size;
19872
19873 Ok(())
19874 }
19875 }
19876
19877 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19878 for BufferCollectionTokenGroupCreateChildRequest
19879 {
19880 #[inline(always)]
19881 fn new_empty() -> Self {
19882 Self::default()
19883 }
19884
19885 unsafe fn decode(
19886 &mut self,
19887 decoder: &mut fidl::encoding::Decoder<
19888 '_,
19889 fidl::encoding::DefaultFuchsiaResourceDialect,
19890 >,
19891 offset: usize,
19892 mut depth: fidl::encoding::Depth,
19893 ) -> fidl::Result<()> {
19894 decoder.debug_check_bounds::<Self>(offset);
19895 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19896 None => return Err(fidl::Error::NotNullable),
19897 Some(len) => len,
19898 };
19899 // Calling decoder.out_of_line_offset(0) is not allowed.
19900 if len == 0 {
19901 return Ok(());
19902 };
19903 depth.increment()?;
19904 let envelope_size = 8;
19905 let bytes_len = len * envelope_size;
19906 let offset = decoder.out_of_line_offset(bytes_len)?;
19907 // Decode the envelope for each type.
19908 let mut _next_ordinal_to_read = 0;
19909 let mut next_offset = offset;
19910 let end_offset = offset + bytes_len;
19911 _next_ordinal_to_read += 1;
19912 if next_offset >= end_offset {
19913 return Ok(());
19914 }
19915
19916 // Decode unknown envelopes for gaps in ordinals.
19917 while _next_ordinal_to_read < 1 {
19918 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19919 _next_ordinal_to_read += 1;
19920 next_offset += envelope_size;
19921 }
19922
19923 let next_out_of_line = decoder.next_out_of_line();
19924 let handles_before = decoder.remaining_handles();
19925 if let Some((inlined, num_bytes, num_handles)) =
19926 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19927 {
19928 let member_inline_size = <fidl::encoding::Endpoint<
19929 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19930 > as fidl::encoding::TypeMarker>::inline_size(
19931 decoder.context
19932 );
19933 if inlined != (member_inline_size <= 4) {
19934 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19935 }
19936 let inner_offset;
19937 let mut inner_depth = depth.clone();
19938 if inlined {
19939 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19940 inner_offset = next_offset;
19941 } else {
19942 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19943 inner_depth.increment()?;
19944 }
19945 let val_ref = self.token_request.get_or_insert_with(|| {
19946 fidl::new_empty!(
19947 fidl::encoding::Endpoint<
19948 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19949 >,
19950 fidl::encoding::DefaultFuchsiaResourceDialect
19951 )
19952 });
19953 fidl::decode!(
19954 fidl::encoding::Endpoint<
19955 fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19956 >,
19957 fidl::encoding::DefaultFuchsiaResourceDialect,
19958 val_ref,
19959 decoder,
19960 inner_offset,
19961 inner_depth
19962 )?;
19963 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19964 {
19965 return Err(fidl::Error::InvalidNumBytesInEnvelope);
19966 }
19967 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19968 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19969 }
19970 }
19971
19972 next_offset += envelope_size;
19973 _next_ordinal_to_read += 1;
19974 if next_offset >= end_offset {
19975 return Ok(());
19976 }
19977
19978 // Decode unknown envelopes for gaps in ordinals.
19979 while _next_ordinal_to_read < 2 {
19980 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19981 _next_ordinal_to_read += 1;
19982 next_offset += envelope_size;
19983 }
19984
19985 let next_out_of_line = decoder.next_out_of_line();
19986 let handles_before = decoder.remaining_handles();
19987 if let Some((inlined, num_bytes, num_handles)) =
19988 fidl::encoding::decode_envelope_header(decoder, next_offset)?
19989 {
19990 let member_inline_size =
19991 <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19992 if inlined != (member_inline_size <= 4) {
19993 return Err(fidl::Error::InvalidInlineBitInEnvelope);
19994 }
19995 let inner_offset;
19996 let mut inner_depth = depth.clone();
19997 if inlined {
19998 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19999 inner_offset = next_offset;
20000 } else {
20001 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20002 inner_depth.increment()?;
20003 }
20004 let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
20005 fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
20006 });
20007 fidl::decode!(
20008 fidl::Rights,
20009 fidl::encoding::DefaultFuchsiaResourceDialect,
20010 val_ref,
20011 decoder,
20012 inner_offset,
20013 inner_depth
20014 )?;
20015 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20016 {
20017 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20018 }
20019 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20020 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20021 }
20022 }
20023
20024 next_offset += envelope_size;
20025
20026 // Decode the remaining unknown envelopes.
20027 while next_offset < end_offset {
20028 _next_ordinal_to_read += 1;
20029 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20030 next_offset += envelope_size;
20031 }
20032
20033 Ok(())
20034 }
20035 }
20036
20037 impl BufferCollectionTokenGroupCreateChildrenSyncResponse {
20038 #[inline(always)]
20039 fn max_ordinal_present(&self) -> u64 {
20040 if let Some(_) = self.tokens {
20041 return 1;
20042 }
20043 0
20044 }
20045 }
20046
20047 impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenGroupCreateChildrenSyncResponse {
20048 type Borrowed<'a> = &'a mut Self;
20049 fn take_or_borrow<'a>(
20050 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20051 ) -> Self::Borrowed<'a> {
20052 value
20053 }
20054 }
20055
20056 unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenGroupCreateChildrenSyncResponse {
20057 type Owned = Self;
20058
20059 #[inline(always)]
20060 fn inline_align(_context: fidl::encoding::Context) -> usize {
20061 8
20062 }
20063
20064 #[inline(always)]
20065 fn inline_size(_context: fidl::encoding::Context) -> usize {
20066 16
20067 }
20068 }
20069
20070 unsafe impl
20071 fidl::encoding::Encode<
20072 BufferCollectionTokenGroupCreateChildrenSyncResponse,
20073 fidl::encoding::DefaultFuchsiaResourceDialect,
20074 > for &mut BufferCollectionTokenGroupCreateChildrenSyncResponse
20075 {
20076 unsafe fn encode(
20077 self,
20078 encoder: &mut fidl::encoding::Encoder<
20079 '_,
20080 fidl::encoding::DefaultFuchsiaResourceDialect,
20081 >,
20082 offset: usize,
20083 mut depth: fidl::encoding::Depth,
20084 ) -> fidl::Result<()> {
20085 encoder
20086 .debug_check_bounds::<BufferCollectionTokenGroupCreateChildrenSyncResponse>(offset);
20087 // Vector header
20088 let max_ordinal: u64 = self.max_ordinal_present();
20089 encoder.write_num(max_ordinal, offset);
20090 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20091 // Calling encoder.out_of_line_offset(0) is not allowed.
20092 if max_ordinal == 0 {
20093 return Ok(());
20094 }
20095 depth.increment()?;
20096 let envelope_size = 8;
20097 let bytes_len = max_ordinal as usize * envelope_size;
20098 #[allow(unused_variables)]
20099 let offset = encoder.out_of_line_offset(bytes_len);
20100 let mut _prev_end_offset: usize = 0;
20101 if 1 > max_ordinal {
20102 return Ok(());
20103 }
20104
20105 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20106 // are envelope_size bytes.
20107 let cur_offset: usize = (1 - 1) * envelope_size;
20108
20109 // Zero reserved fields.
20110 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20111
20112 // Safety:
20113 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20114 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20115 // envelope_size bytes, there is always sufficient room.
20116 fidl::encoding::encode_in_envelope_optional::<
20117 fidl::encoding::Vector<
20118 fidl::encoding::Endpoint<
20119 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20120 >,
20121 64,
20122 >,
20123 fidl::encoding::DefaultFuchsiaResourceDialect,
20124 >(
20125 self.tokens.as_mut().map(
20126 <fidl::encoding::Vector<
20127 fidl::encoding::Endpoint<
20128 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20129 >,
20130 64,
20131 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20132 ),
20133 encoder,
20134 offset + cur_offset,
20135 depth,
20136 )?;
20137
20138 _prev_end_offset = cur_offset + envelope_size;
20139
20140 Ok(())
20141 }
20142 }
20143
20144 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20145 for BufferCollectionTokenGroupCreateChildrenSyncResponse
20146 {
20147 #[inline(always)]
20148 fn new_empty() -> Self {
20149 Self::default()
20150 }
20151
20152 unsafe fn decode(
20153 &mut self,
20154 decoder: &mut fidl::encoding::Decoder<
20155 '_,
20156 fidl::encoding::DefaultFuchsiaResourceDialect,
20157 >,
20158 offset: usize,
20159 mut depth: fidl::encoding::Depth,
20160 ) -> fidl::Result<()> {
20161 decoder.debug_check_bounds::<Self>(offset);
20162 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20163 None => return Err(fidl::Error::NotNullable),
20164 Some(len) => len,
20165 };
20166 // Calling decoder.out_of_line_offset(0) is not allowed.
20167 if len == 0 {
20168 return Ok(());
20169 };
20170 depth.increment()?;
20171 let envelope_size = 8;
20172 let bytes_len = len * envelope_size;
20173 let offset = decoder.out_of_line_offset(bytes_len)?;
20174 // Decode the envelope for each type.
20175 let mut _next_ordinal_to_read = 0;
20176 let mut next_offset = offset;
20177 let end_offset = offset + bytes_len;
20178 _next_ordinal_to_read += 1;
20179 if next_offset >= end_offset {
20180 return Ok(());
20181 }
20182
20183 // Decode unknown envelopes for gaps in ordinals.
20184 while _next_ordinal_to_read < 1 {
20185 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20186 _next_ordinal_to_read += 1;
20187 next_offset += envelope_size;
20188 }
20189
20190 let next_out_of_line = decoder.next_out_of_line();
20191 let handles_before = decoder.remaining_handles();
20192 if let Some((inlined, num_bytes, num_handles)) =
20193 fidl::encoding::decode_envelope_header(decoder, next_offset)?
20194 {
20195 let member_inline_size = <fidl::encoding::Vector<
20196 fidl::encoding::Endpoint<
20197 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20198 >,
20199 64,
20200 > as fidl::encoding::TypeMarker>::inline_size(
20201 decoder.context
20202 );
20203 if inlined != (member_inline_size <= 4) {
20204 return Err(fidl::Error::InvalidInlineBitInEnvelope);
20205 }
20206 let inner_offset;
20207 let mut inner_depth = depth.clone();
20208 if inlined {
20209 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20210 inner_offset = next_offset;
20211 } else {
20212 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20213 inner_depth.increment()?;
20214 }
20215 let val_ref = self.tokens.get_or_insert_with(|| {
20216 fidl::new_empty!(
20217 fidl::encoding::Vector<
20218 fidl::encoding::Endpoint<
20219 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20220 >,
20221 64,
20222 >,
20223 fidl::encoding::DefaultFuchsiaResourceDialect
20224 )
20225 });
20226 fidl::decode!(
20227 fidl::encoding::Vector<
20228 fidl::encoding::Endpoint<
20229 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20230 >,
20231 64,
20232 >,
20233 fidl::encoding::DefaultFuchsiaResourceDialect,
20234 val_ref,
20235 decoder,
20236 inner_offset,
20237 inner_depth
20238 )?;
20239 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20240 {
20241 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20242 }
20243 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20244 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20245 }
20246 }
20247
20248 next_offset += envelope_size;
20249
20250 // Decode the remaining unknown envelopes.
20251 while next_offset < end_offset {
20252 _next_ordinal_to_read += 1;
20253 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20254 next_offset += envelope_size;
20255 }
20256
20257 Ok(())
20258 }
20259 }
20260
20261 impl BufferCollectionTokenDuplicateSyncResponse {
20262 #[inline(always)]
20263 fn max_ordinal_present(&self) -> u64 {
20264 if let Some(_) = self.tokens {
20265 return 1;
20266 }
20267 0
20268 }
20269 }
20270
20271 impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenDuplicateSyncResponse {
20272 type Borrowed<'a> = &'a mut Self;
20273 fn take_or_borrow<'a>(
20274 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20275 ) -> Self::Borrowed<'a> {
20276 value
20277 }
20278 }
20279
20280 unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenDuplicateSyncResponse {
20281 type Owned = Self;
20282
20283 #[inline(always)]
20284 fn inline_align(_context: fidl::encoding::Context) -> usize {
20285 8
20286 }
20287
20288 #[inline(always)]
20289 fn inline_size(_context: fidl::encoding::Context) -> usize {
20290 16
20291 }
20292 }
20293
20294 unsafe impl
20295 fidl::encoding::Encode<
20296 BufferCollectionTokenDuplicateSyncResponse,
20297 fidl::encoding::DefaultFuchsiaResourceDialect,
20298 > for &mut BufferCollectionTokenDuplicateSyncResponse
20299 {
20300 unsafe fn encode(
20301 self,
20302 encoder: &mut fidl::encoding::Encoder<
20303 '_,
20304 fidl::encoding::DefaultFuchsiaResourceDialect,
20305 >,
20306 offset: usize,
20307 mut depth: fidl::encoding::Depth,
20308 ) -> fidl::Result<()> {
20309 encoder.debug_check_bounds::<BufferCollectionTokenDuplicateSyncResponse>(offset);
20310 // Vector header
20311 let max_ordinal: u64 = self.max_ordinal_present();
20312 encoder.write_num(max_ordinal, offset);
20313 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20314 // Calling encoder.out_of_line_offset(0) is not allowed.
20315 if max_ordinal == 0 {
20316 return Ok(());
20317 }
20318 depth.increment()?;
20319 let envelope_size = 8;
20320 let bytes_len = max_ordinal as usize * envelope_size;
20321 #[allow(unused_variables)]
20322 let offset = encoder.out_of_line_offset(bytes_len);
20323 let mut _prev_end_offset: usize = 0;
20324 if 1 > max_ordinal {
20325 return Ok(());
20326 }
20327
20328 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20329 // are envelope_size bytes.
20330 let cur_offset: usize = (1 - 1) * envelope_size;
20331
20332 // Zero reserved fields.
20333 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20334
20335 // Safety:
20336 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20337 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20338 // envelope_size bytes, there is always sufficient room.
20339 fidl::encoding::encode_in_envelope_optional::<
20340 fidl::encoding::Vector<
20341 fidl::encoding::Endpoint<
20342 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20343 >,
20344 64,
20345 >,
20346 fidl::encoding::DefaultFuchsiaResourceDialect,
20347 >(
20348 self.tokens.as_mut().map(
20349 <fidl::encoding::Vector<
20350 fidl::encoding::Endpoint<
20351 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20352 >,
20353 64,
20354 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20355 ),
20356 encoder,
20357 offset + cur_offset,
20358 depth,
20359 )?;
20360
20361 _prev_end_offset = cur_offset + envelope_size;
20362
20363 Ok(())
20364 }
20365 }
20366
20367 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20368 for BufferCollectionTokenDuplicateSyncResponse
20369 {
20370 #[inline(always)]
20371 fn new_empty() -> Self {
20372 Self::default()
20373 }
20374
20375 unsafe fn decode(
20376 &mut self,
20377 decoder: &mut fidl::encoding::Decoder<
20378 '_,
20379 fidl::encoding::DefaultFuchsiaResourceDialect,
20380 >,
20381 offset: usize,
20382 mut depth: fidl::encoding::Depth,
20383 ) -> fidl::Result<()> {
20384 decoder.debug_check_bounds::<Self>(offset);
20385 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20386 None => return Err(fidl::Error::NotNullable),
20387 Some(len) => len,
20388 };
20389 // Calling decoder.out_of_line_offset(0) is not allowed.
20390 if len == 0 {
20391 return Ok(());
20392 };
20393 depth.increment()?;
20394 let envelope_size = 8;
20395 let bytes_len = len * envelope_size;
20396 let offset = decoder.out_of_line_offset(bytes_len)?;
20397 // Decode the envelope for each type.
20398 let mut _next_ordinal_to_read = 0;
20399 let mut next_offset = offset;
20400 let end_offset = offset + bytes_len;
20401 _next_ordinal_to_read += 1;
20402 if next_offset >= end_offset {
20403 return Ok(());
20404 }
20405
20406 // Decode unknown envelopes for gaps in ordinals.
20407 while _next_ordinal_to_read < 1 {
20408 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20409 _next_ordinal_to_read += 1;
20410 next_offset += envelope_size;
20411 }
20412
20413 let next_out_of_line = decoder.next_out_of_line();
20414 let handles_before = decoder.remaining_handles();
20415 if let Some((inlined, num_bytes, num_handles)) =
20416 fidl::encoding::decode_envelope_header(decoder, next_offset)?
20417 {
20418 let member_inline_size = <fidl::encoding::Vector<
20419 fidl::encoding::Endpoint<
20420 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20421 >,
20422 64,
20423 > as fidl::encoding::TypeMarker>::inline_size(
20424 decoder.context
20425 );
20426 if inlined != (member_inline_size <= 4) {
20427 return Err(fidl::Error::InvalidInlineBitInEnvelope);
20428 }
20429 let inner_offset;
20430 let mut inner_depth = depth.clone();
20431 if inlined {
20432 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20433 inner_offset = next_offset;
20434 } else {
20435 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20436 inner_depth.increment()?;
20437 }
20438 let val_ref = self.tokens.get_or_insert_with(|| {
20439 fidl::new_empty!(
20440 fidl::encoding::Vector<
20441 fidl::encoding::Endpoint<
20442 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20443 >,
20444 64,
20445 >,
20446 fidl::encoding::DefaultFuchsiaResourceDialect
20447 )
20448 });
20449 fidl::decode!(
20450 fidl::encoding::Vector<
20451 fidl::encoding::Endpoint<
20452 fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20453 >,
20454 64,
20455 >,
20456 fidl::encoding::DefaultFuchsiaResourceDialect,
20457 val_ref,
20458 decoder,
20459 inner_offset,
20460 inner_depth
20461 )?;
20462 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20463 {
20464 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20465 }
20466 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20467 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20468 }
20469 }
20470
20471 next_offset += envelope_size;
20472
20473 // Decode the remaining unknown envelopes.
20474 while next_offset < end_offset {
20475 _next_ordinal_to_read += 1;
20476 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20477 next_offset += envelope_size;
20478 }
20479
20480 Ok(())
20481 }
20482 }
20483
20484 impl BufferCollectionWaitForAllBuffersAllocatedResponse {
20485 #[inline(always)]
20486 fn max_ordinal_present(&self) -> u64 {
20487 if let Some(_) = self.buffer_collection_info {
20488 return 1;
20489 }
20490 0
20491 }
20492 }
20493
20494 impl fidl::encoding::ResourceTypeMarker for BufferCollectionWaitForAllBuffersAllocatedResponse {
20495 type Borrowed<'a> = &'a mut Self;
20496 fn take_or_borrow<'a>(
20497 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20498 ) -> Self::Borrowed<'a> {
20499 value
20500 }
20501 }
20502
20503 unsafe impl fidl::encoding::TypeMarker for BufferCollectionWaitForAllBuffersAllocatedResponse {
20504 type Owned = Self;
20505
20506 #[inline(always)]
20507 fn inline_align(_context: fidl::encoding::Context) -> usize {
20508 8
20509 }
20510
20511 #[inline(always)]
20512 fn inline_size(_context: fidl::encoding::Context) -> usize {
20513 16
20514 }
20515 }
20516
20517 unsafe impl
20518 fidl::encoding::Encode<
20519 BufferCollectionWaitForAllBuffersAllocatedResponse,
20520 fidl::encoding::DefaultFuchsiaResourceDialect,
20521 > for &mut BufferCollectionWaitForAllBuffersAllocatedResponse
20522 {
20523 unsafe fn encode(
20524 self,
20525 encoder: &mut fidl::encoding::Encoder<
20526 '_,
20527 fidl::encoding::DefaultFuchsiaResourceDialect,
20528 >,
20529 offset: usize,
20530 mut depth: fidl::encoding::Depth,
20531 ) -> fidl::Result<()> {
20532 encoder
20533 .debug_check_bounds::<BufferCollectionWaitForAllBuffersAllocatedResponse>(offset);
20534 // Vector header
20535 let max_ordinal: u64 = self.max_ordinal_present();
20536 encoder.write_num(max_ordinal, offset);
20537 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20538 // Calling encoder.out_of_line_offset(0) is not allowed.
20539 if max_ordinal == 0 {
20540 return Ok(());
20541 }
20542 depth.increment()?;
20543 let envelope_size = 8;
20544 let bytes_len = max_ordinal as usize * envelope_size;
20545 #[allow(unused_variables)]
20546 let offset = encoder.out_of_line_offset(bytes_len);
20547 let mut _prev_end_offset: usize = 0;
20548 if 1 > max_ordinal {
20549 return Ok(());
20550 }
20551
20552 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20553 // are envelope_size bytes.
20554 let cur_offset: usize = (1 - 1) * envelope_size;
20555
20556 // Zero reserved fields.
20557 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20558
20559 // Safety:
20560 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20561 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20562 // envelope_size bytes, there is always sufficient room.
20563 fidl::encoding::encode_in_envelope_optional::<
20564 BufferCollectionInfo,
20565 fidl::encoding::DefaultFuchsiaResourceDialect,
20566 >(
20567 self.buffer_collection_info.as_mut().map(
20568 <BufferCollectionInfo as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20569 ),
20570 encoder,
20571 offset + cur_offset,
20572 depth,
20573 )?;
20574
20575 _prev_end_offset = cur_offset + envelope_size;
20576
20577 Ok(())
20578 }
20579 }
20580
20581 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20582 for BufferCollectionWaitForAllBuffersAllocatedResponse
20583 {
20584 #[inline(always)]
20585 fn new_empty() -> Self {
20586 Self::default()
20587 }
20588
20589 unsafe fn decode(
20590 &mut self,
20591 decoder: &mut fidl::encoding::Decoder<
20592 '_,
20593 fidl::encoding::DefaultFuchsiaResourceDialect,
20594 >,
20595 offset: usize,
20596 mut depth: fidl::encoding::Depth,
20597 ) -> fidl::Result<()> {
20598 decoder.debug_check_bounds::<Self>(offset);
20599 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20600 None => return Err(fidl::Error::NotNullable),
20601 Some(len) => len,
20602 };
20603 // Calling decoder.out_of_line_offset(0) is not allowed.
20604 if len == 0 {
20605 return Ok(());
20606 };
20607 depth.increment()?;
20608 let envelope_size = 8;
20609 let bytes_len = len * envelope_size;
20610 let offset = decoder.out_of_line_offset(bytes_len)?;
20611 // Decode the envelope for each type.
20612 let mut _next_ordinal_to_read = 0;
20613 let mut next_offset = offset;
20614 let end_offset = offset + bytes_len;
20615 _next_ordinal_to_read += 1;
20616 if next_offset >= end_offset {
20617 return Ok(());
20618 }
20619
20620 // Decode unknown envelopes for gaps in ordinals.
20621 while _next_ordinal_to_read < 1 {
20622 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20623 _next_ordinal_to_read += 1;
20624 next_offset += envelope_size;
20625 }
20626
20627 let next_out_of_line = decoder.next_out_of_line();
20628 let handles_before = decoder.remaining_handles();
20629 if let Some((inlined, num_bytes, num_handles)) =
20630 fidl::encoding::decode_envelope_header(decoder, next_offset)?
20631 {
20632 let member_inline_size =
20633 <BufferCollectionInfo as fidl::encoding::TypeMarker>::inline_size(
20634 decoder.context,
20635 );
20636 if inlined != (member_inline_size <= 4) {
20637 return Err(fidl::Error::InvalidInlineBitInEnvelope);
20638 }
20639 let inner_offset;
20640 let mut inner_depth = depth.clone();
20641 if inlined {
20642 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20643 inner_offset = next_offset;
20644 } else {
20645 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20646 inner_depth.increment()?;
20647 }
20648 let val_ref = self.buffer_collection_info.get_or_insert_with(|| {
20649 fidl::new_empty!(
20650 BufferCollectionInfo,
20651 fidl::encoding::DefaultFuchsiaResourceDialect
20652 )
20653 });
20654 fidl::decode!(
20655 BufferCollectionInfo,
20656 fidl::encoding::DefaultFuchsiaResourceDialect,
20657 val_ref,
20658 decoder,
20659 inner_offset,
20660 inner_depth
20661 )?;
20662 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20663 {
20664 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20665 }
20666 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20667 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20668 }
20669 }
20670
20671 next_offset += envelope_size;
20672
20673 // Decode the remaining unknown envelopes.
20674 while next_offset < end_offset {
20675 _next_ordinal_to_read += 1;
20676 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20677 next_offset += envelope_size;
20678 }
20679
20680 Ok(())
20681 }
20682 }
20683
20684 impl NodeAttachNodeTrackingRequest {
20685 #[inline(always)]
20686 fn max_ordinal_present(&self) -> u64 {
20687 if let Some(_) = self.server_end {
20688 return 1;
20689 }
20690 0
20691 }
20692 }
20693
20694 impl fidl::encoding::ResourceTypeMarker for NodeAttachNodeTrackingRequest {
20695 type Borrowed<'a> = &'a mut Self;
20696 fn take_or_borrow<'a>(
20697 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20698 ) -> Self::Borrowed<'a> {
20699 value
20700 }
20701 }
20702
20703 unsafe impl fidl::encoding::TypeMarker for NodeAttachNodeTrackingRequest {
20704 type Owned = Self;
20705
20706 #[inline(always)]
20707 fn inline_align(_context: fidl::encoding::Context) -> usize {
20708 8
20709 }
20710
20711 #[inline(always)]
20712 fn inline_size(_context: fidl::encoding::Context) -> usize {
20713 16
20714 }
20715 }
20716
20717 unsafe impl
20718 fidl::encoding::Encode<
20719 NodeAttachNodeTrackingRequest,
20720 fidl::encoding::DefaultFuchsiaResourceDialect,
20721 > for &mut NodeAttachNodeTrackingRequest
20722 {
20723 unsafe fn encode(
20724 self,
20725 encoder: &mut fidl::encoding::Encoder<
20726 '_,
20727 fidl::encoding::DefaultFuchsiaResourceDialect,
20728 >,
20729 offset: usize,
20730 mut depth: fidl::encoding::Depth,
20731 ) -> fidl::Result<()> {
20732 encoder.debug_check_bounds::<NodeAttachNodeTrackingRequest>(offset);
20733 // Vector header
20734 let max_ordinal: u64 = self.max_ordinal_present();
20735 encoder.write_num(max_ordinal, offset);
20736 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20737 // Calling encoder.out_of_line_offset(0) is not allowed.
20738 if max_ordinal == 0 {
20739 return Ok(());
20740 }
20741 depth.increment()?;
20742 let envelope_size = 8;
20743 let bytes_len = max_ordinal as usize * envelope_size;
20744 #[allow(unused_variables)]
20745 let offset = encoder.out_of_line_offset(bytes_len);
20746 let mut _prev_end_offset: usize = 0;
20747 if 1 > max_ordinal {
20748 return Ok(());
20749 }
20750
20751 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20752 // are envelope_size bytes.
20753 let cur_offset: usize = (1 - 1) * envelope_size;
20754
20755 // Zero reserved fields.
20756 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20757
20758 // Safety:
20759 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20760 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20761 // envelope_size bytes, there is always sufficient room.
20762 fidl::encoding::encode_in_envelope_optional::<
20763 fidl::encoding::HandleType<
20764 fidl::EventPair,
20765 { fidl::ObjectType::EVENTPAIR.into_raw() },
20766 2147483648,
20767 >,
20768 fidl::encoding::DefaultFuchsiaResourceDialect,
20769 >(
20770 self.server_end.as_mut().map(
20771 <fidl::encoding::HandleType<
20772 fidl::EventPair,
20773 { fidl::ObjectType::EVENTPAIR.into_raw() },
20774 2147483648,
20775 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20776 ),
20777 encoder,
20778 offset + cur_offset,
20779 depth,
20780 )?;
20781
20782 _prev_end_offset = cur_offset + envelope_size;
20783
20784 Ok(())
20785 }
20786 }
20787
20788 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20789 for NodeAttachNodeTrackingRequest
20790 {
20791 #[inline(always)]
20792 fn new_empty() -> Self {
20793 Self::default()
20794 }
20795
20796 unsafe fn decode(
20797 &mut self,
20798 decoder: &mut fidl::encoding::Decoder<
20799 '_,
20800 fidl::encoding::DefaultFuchsiaResourceDialect,
20801 >,
20802 offset: usize,
20803 mut depth: fidl::encoding::Depth,
20804 ) -> fidl::Result<()> {
20805 decoder.debug_check_bounds::<Self>(offset);
20806 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20807 None => return Err(fidl::Error::NotNullable),
20808 Some(len) => len,
20809 };
20810 // Calling decoder.out_of_line_offset(0) is not allowed.
20811 if len == 0 {
20812 return Ok(());
20813 };
20814 depth.increment()?;
20815 let envelope_size = 8;
20816 let bytes_len = len * envelope_size;
20817 let offset = decoder.out_of_line_offset(bytes_len)?;
20818 // Decode the envelope for each type.
20819 let mut _next_ordinal_to_read = 0;
20820 let mut next_offset = offset;
20821 let end_offset = offset + bytes_len;
20822 _next_ordinal_to_read += 1;
20823 if next_offset >= end_offset {
20824 return Ok(());
20825 }
20826
20827 // Decode unknown envelopes for gaps in ordinals.
20828 while _next_ordinal_to_read < 1 {
20829 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20830 _next_ordinal_to_read += 1;
20831 next_offset += envelope_size;
20832 }
20833
20834 let next_out_of_line = decoder.next_out_of_line();
20835 let handles_before = decoder.remaining_handles();
20836 if let Some((inlined, num_bytes, num_handles)) =
20837 fidl::encoding::decode_envelope_header(decoder, next_offset)?
20838 {
20839 let member_inline_size = <fidl::encoding::HandleType<
20840 fidl::EventPair,
20841 { fidl::ObjectType::EVENTPAIR.into_raw() },
20842 2147483648,
20843 > as fidl::encoding::TypeMarker>::inline_size(
20844 decoder.context
20845 );
20846 if inlined != (member_inline_size <= 4) {
20847 return Err(fidl::Error::InvalidInlineBitInEnvelope);
20848 }
20849 let inner_offset;
20850 let mut inner_depth = depth.clone();
20851 if inlined {
20852 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20853 inner_offset = next_offset;
20854 } else {
20855 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20856 inner_depth.increment()?;
20857 }
20858 let val_ref =
20859 self.server_end.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
20860 fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
20861 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20862 {
20863 return Err(fidl::Error::InvalidNumBytesInEnvelope);
20864 }
20865 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20866 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20867 }
20868 }
20869
20870 next_offset += envelope_size;
20871
20872 // Decode the remaining unknown envelopes.
20873 while next_offset < end_offset {
20874 _next_ordinal_to_read += 1;
20875 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20876 next_offset += envelope_size;
20877 }
20878
20879 Ok(())
20880 }
20881 }
20882
20883 impl NodeIsAlternateForRequest {
20884 #[inline(always)]
20885 fn max_ordinal_present(&self) -> u64 {
20886 if let Some(_) = self.node_ref {
20887 return 1;
20888 }
20889 0
20890 }
20891 }
20892
20893 impl fidl::encoding::ResourceTypeMarker for NodeIsAlternateForRequest {
20894 type Borrowed<'a> = &'a mut Self;
20895 fn take_or_borrow<'a>(
20896 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20897 ) -> Self::Borrowed<'a> {
20898 value
20899 }
20900 }
20901
20902 unsafe impl fidl::encoding::TypeMarker for NodeIsAlternateForRequest {
20903 type Owned = Self;
20904
20905 #[inline(always)]
20906 fn inline_align(_context: fidl::encoding::Context) -> usize {
20907 8
20908 }
20909
20910 #[inline(always)]
20911 fn inline_size(_context: fidl::encoding::Context) -> usize {
20912 16
20913 }
20914 }
20915
20916 unsafe impl
20917 fidl::encoding::Encode<
20918 NodeIsAlternateForRequest,
20919 fidl::encoding::DefaultFuchsiaResourceDialect,
20920 > for &mut NodeIsAlternateForRequest
20921 {
20922 unsafe fn encode(
20923 self,
20924 encoder: &mut fidl::encoding::Encoder<
20925 '_,
20926 fidl::encoding::DefaultFuchsiaResourceDialect,
20927 >,
20928 offset: usize,
20929 mut depth: fidl::encoding::Depth,
20930 ) -> fidl::Result<()> {
20931 encoder.debug_check_bounds::<NodeIsAlternateForRequest>(offset);
20932 // Vector header
20933 let max_ordinal: u64 = self.max_ordinal_present();
20934 encoder.write_num(max_ordinal, offset);
20935 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20936 // Calling encoder.out_of_line_offset(0) is not allowed.
20937 if max_ordinal == 0 {
20938 return Ok(());
20939 }
20940 depth.increment()?;
20941 let envelope_size = 8;
20942 let bytes_len = max_ordinal as usize * envelope_size;
20943 #[allow(unused_variables)]
20944 let offset = encoder.out_of_line_offset(bytes_len);
20945 let mut _prev_end_offset: usize = 0;
20946 if 1 > max_ordinal {
20947 return Ok(());
20948 }
20949
20950 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20951 // are envelope_size bytes.
20952 let cur_offset: usize = (1 - 1) * envelope_size;
20953
20954 // Zero reserved fields.
20955 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20956
20957 // Safety:
20958 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20959 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20960 // envelope_size bytes, there is always sufficient room.
20961 fidl::encoding::encode_in_envelope_optional::<
20962 fidl::encoding::HandleType<
20963 fidl::Event,
20964 { fidl::ObjectType::EVENT.into_raw() },
20965 2147483648,
20966 >,
20967 fidl::encoding::DefaultFuchsiaResourceDialect,
20968 >(
20969 self.node_ref.as_mut().map(
20970 <fidl::encoding::HandleType<
20971 fidl::Event,
20972 { fidl::ObjectType::EVENT.into_raw() },
20973 2147483648,
20974 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20975 ),
20976 encoder,
20977 offset + cur_offset,
20978 depth,
20979 )?;
20980
20981 _prev_end_offset = cur_offset + envelope_size;
20982
20983 Ok(())
20984 }
20985 }
20986
20987 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20988 for NodeIsAlternateForRequest
20989 {
20990 #[inline(always)]
20991 fn new_empty() -> Self {
20992 Self::default()
20993 }
20994
20995 unsafe fn decode(
20996 &mut self,
20997 decoder: &mut fidl::encoding::Decoder<
20998 '_,
20999 fidl::encoding::DefaultFuchsiaResourceDialect,
21000 >,
21001 offset: usize,
21002 mut depth: fidl::encoding::Depth,
21003 ) -> fidl::Result<()> {
21004 decoder.debug_check_bounds::<Self>(offset);
21005 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21006 None => return Err(fidl::Error::NotNullable),
21007 Some(len) => len,
21008 };
21009 // Calling decoder.out_of_line_offset(0) is not allowed.
21010 if len == 0 {
21011 return Ok(());
21012 };
21013 depth.increment()?;
21014 let envelope_size = 8;
21015 let bytes_len = len * envelope_size;
21016 let offset = decoder.out_of_line_offset(bytes_len)?;
21017 // Decode the envelope for each type.
21018 let mut _next_ordinal_to_read = 0;
21019 let mut next_offset = offset;
21020 let end_offset = offset + bytes_len;
21021 _next_ordinal_to_read += 1;
21022 if next_offset >= end_offset {
21023 return Ok(());
21024 }
21025
21026 // Decode unknown envelopes for gaps in ordinals.
21027 while _next_ordinal_to_read < 1 {
21028 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21029 _next_ordinal_to_read += 1;
21030 next_offset += envelope_size;
21031 }
21032
21033 let next_out_of_line = decoder.next_out_of_line();
21034 let handles_before = decoder.remaining_handles();
21035 if let Some((inlined, num_bytes, num_handles)) =
21036 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21037 {
21038 let member_inline_size = <fidl::encoding::HandleType<
21039 fidl::Event,
21040 { fidl::ObjectType::EVENT.into_raw() },
21041 2147483648,
21042 > as fidl::encoding::TypeMarker>::inline_size(
21043 decoder.context
21044 );
21045 if inlined != (member_inline_size <= 4) {
21046 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21047 }
21048 let inner_offset;
21049 let mut inner_depth = depth.clone();
21050 if inlined {
21051 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21052 inner_offset = next_offset;
21053 } else {
21054 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21055 inner_depth.increment()?;
21056 }
21057 let val_ref =
21058 self.node_ref.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21059 fidl::decode!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21060 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21061 {
21062 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21063 }
21064 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21065 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21066 }
21067 }
21068
21069 next_offset += envelope_size;
21070
21071 // Decode the remaining unknown envelopes.
21072 while next_offset < end_offset {
21073 _next_ordinal_to_read += 1;
21074 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21075 next_offset += envelope_size;
21076 }
21077
21078 Ok(())
21079 }
21080 }
21081
21082 impl NodeSetWeakOkRequest {
21083 #[inline(always)]
21084 fn max_ordinal_present(&self) -> u64 {
21085 if let Some(_) = self.for_child_nodes_also {
21086 return 1;
21087 }
21088 0
21089 }
21090 }
21091
21092 impl fidl::encoding::ResourceTypeMarker for NodeSetWeakOkRequest {
21093 type Borrowed<'a> = &'a mut Self;
21094 fn take_or_borrow<'a>(
21095 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21096 ) -> Self::Borrowed<'a> {
21097 value
21098 }
21099 }
21100
21101 unsafe impl fidl::encoding::TypeMarker for NodeSetWeakOkRequest {
21102 type Owned = Self;
21103
21104 #[inline(always)]
21105 fn inline_align(_context: fidl::encoding::Context) -> usize {
21106 8
21107 }
21108
21109 #[inline(always)]
21110 fn inline_size(_context: fidl::encoding::Context) -> usize {
21111 16
21112 }
21113 }
21114
21115 unsafe impl
21116 fidl::encoding::Encode<NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect>
21117 for &mut NodeSetWeakOkRequest
21118 {
21119 unsafe fn encode(
21120 self,
21121 encoder: &mut fidl::encoding::Encoder<
21122 '_,
21123 fidl::encoding::DefaultFuchsiaResourceDialect,
21124 >,
21125 offset: usize,
21126 mut depth: fidl::encoding::Depth,
21127 ) -> fidl::Result<()> {
21128 encoder.debug_check_bounds::<NodeSetWeakOkRequest>(offset);
21129 // Vector header
21130 let max_ordinal: u64 = self.max_ordinal_present();
21131 encoder.write_num(max_ordinal, offset);
21132 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21133 // Calling encoder.out_of_line_offset(0) is not allowed.
21134 if max_ordinal == 0 {
21135 return Ok(());
21136 }
21137 depth.increment()?;
21138 let envelope_size = 8;
21139 let bytes_len = max_ordinal as usize * envelope_size;
21140 #[allow(unused_variables)]
21141 let offset = encoder.out_of_line_offset(bytes_len);
21142 let mut _prev_end_offset: usize = 0;
21143 if 1 > max_ordinal {
21144 return Ok(());
21145 }
21146
21147 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21148 // are envelope_size bytes.
21149 let cur_offset: usize = (1 - 1) * envelope_size;
21150
21151 // Zero reserved fields.
21152 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21153
21154 // Safety:
21155 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21156 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21157 // envelope_size bytes, there is always sufficient room.
21158 fidl::encoding::encode_in_envelope_optional::<
21159 bool,
21160 fidl::encoding::DefaultFuchsiaResourceDialect,
21161 >(
21162 self.for_child_nodes_also
21163 .as_ref()
21164 .map(<bool as fidl::encoding::ValueTypeMarker>::borrow),
21165 encoder,
21166 offset + cur_offset,
21167 depth,
21168 )?;
21169
21170 _prev_end_offset = cur_offset + envelope_size;
21171
21172 Ok(())
21173 }
21174 }
21175
21176 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21177 for NodeSetWeakOkRequest
21178 {
21179 #[inline(always)]
21180 fn new_empty() -> Self {
21181 Self::default()
21182 }
21183
21184 unsafe fn decode(
21185 &mut self,
21186 decoder: &mut fidl::encoding::Decoder<
21187 '_,
21188 fidl::encoding::DefaultFuchsiaResourceDialect,
21189 >,
21190 offset: usize,
21191 mut depth: fidl::encoding::Depth,
21192 ) -> fidl::Result<()> {
21193 decoder.debug_check_bounds::<Self>(offset);
21194 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21195 None => return Err(fidl::Error::NotNullable),
21196 Some(len) => len,
21197 };
21198 // Calling decoder.out_of_line_offset(0) is not allowed.
21199 if len == 0 {
21200 return Ok(());
21201 };
21202 depth.increment()?;
21203 let envelope_size = 8;
21204 let bytes_len = len * envelope_size;
21205 let offset = decoder.out_of_line_offset(bytes_len)?;
21206 // Decode the envelope for each type.
21207 let mut _next_ordinal_to_read = 0;
21208 let mut next_offset = offset;
21209 let end_offset = offset + bytes_len;
21210 _next_ordinal_to_read += 1;
21211 if next_offset >= end_offset {
21212 return Ok(());
21213 }
21214
21215 // Decode unknown envelopes for gaps in ordinals.
21216 while _next_ordinal_to_read < 1 {
21217 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21218 _next_ordinal_to_read += 1;
21219 next_offset += envelope_size;
21220 }
21221
21222 let next_out_of_line = decoder.next_out_of_line();
21223 let handles_before = decoder.remaining_handles();
21224 if let Some((inlined, num_bytes, num_handles)) =
21225 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21226 {
21227 let member_inline_size =
21228 <bool as fidl::encoding::TypeMarker>::inline_size(decoder.context);
21229 if inlined != (member_inline_size <= 4) {
21230 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21231 }
21232 let inner_offset;
21233 let mut inner_depth = depth.clone();
21234 if inlined {
21235 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21236 inner_offset = next_offset;
21237 } else {
21238 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21239 inner_depth.increment()?;
21240 }
21241 let val_ref = self.for_child_nodes_also.get_or_insert_with(|| {
21242 fidl::new_empty!(bool, fidl::encoding::DefaultFuchsiaResourceDialect)
21243 });
21244 fidl::decode!(
21245 bool,
21246 fidl::encoding::DefaultFuchsiaResourceDialect,
21247 val_ref,
21248 decoder,
21249 inner_offset,
21250 inner_depth
21251 )?;
21252 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21253 {
21254 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21255 }
21256 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21257 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21258 }
21259 }
21260
21261 next_offset += envelope_size;
21262
21263 // Decode the remaining unknown envelopes.
21264 while next_offset < end_offset {
21265 _next_ordinal_to_read += 1;
21266 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21267 next_offset += envelope_size;
21268 }
21269
21270 Ok(())
21271 }
21272 }
21273
21274 impl NodeGetNodeRefResponse {
21275 #[inline(always)]
21276 fn max_ordinal_present(&self) -> u64 {
21277 if let Some(_) = self.node_ref {
21278 return 1;
21279 }
21280 0
21281 }
21282 }
21283
21284 impl fidl::encoding::ResourceTypeMarker for NodeGetNodeRefResponse {
21285 type Borrowed<'a> = &'a mut Self;
21286 fn take_or_borrow<'a>(
21287 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21288 ) -> Self::Borrowed<'a> {
21289 value
21290 }
21291 }
21292
21293 unsafe impl fidl::encoding::TypeMarker for NodeGetNodeRefResponse {
21294 type Owned = Self;
21295
21296 #[inline(always)]
21297 fn inline_align(_context: fidl::encoding::Context) -> usize {
21298 8
21299 }
21300
21301 #[inline(always)]
21302 fn inline_size(_context: fidl::encoding::Context) -> usize {
21303 16
21304 }
21305 }
21306
21307 unsafe impl
21308 fidl::encoding::Encode<
21309 NodeGetNodeRefResponse,
21310 fidl::encoding::DefaultFuchsiaResourceDialect,
21311 > for &mut NodeGetNodeRefResponse
21312 {
21313 unsafe fn encode(
21314 self,
21315 encoder: &mut fidl::encoding::Encoder<
21316 '_,
21317 fidl::encoding::DefaultFuchsiaResourceDialect,
21318 >,
21319 offset: usize,
21320 mut depth: fidl::encoding::Depth,
21321 ) -> fidl::Result<()> {
21322 encoder.debug_check_bounds::<NodeGetNodeRefResponse>(offset);
21323 // Vector header
21324 let max_ordinal: u64 = self.max_ordinal_present();
21325 encoder.write_num(max_ordinal, offset);
21326 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21327 // Calling encoder.out_of_line_offset(0) is not allowed.
21328 if max_ordinal == 0 {
21329 return Ok(());
21330 }
21331 depth.increment()?;
21332 let envelope_size = 8;
21333 let bytes_len = max_ordinal as usize * envelope_size;
21334 #[allow(unused_variables)]
21335 let offset = encoder.out_of_line_offset(bytes_len);
21336 let mut _prev_end_offset: usize = 0;
21337 if 1 > max_ordinal {
21338 return Ok(());
21339 }
21340
21341 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21342 // are envelope_size bytes.
21343 let cur_offset: usize = (1 - 1) * envelope_size;
21344
21345 // Zero reserved fields.
21346 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21347
21348 // Safety:
21349 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21350 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21351 // envelope_size bytes, there is always sufficient room.
21352 fidl::encoding::encode_in_envelope_optional::<
21353 fidl::encoding::HandleType<
21354 fidl::Event,
21355 { fidl::ObjectType::EVENT.into_raw() },
21356 2147483648,
21357 >,
21358 fidl::encoding::DefaultFuchsiaResourceDialect,
21359 >(
21360 self.node_ref.as_mut().map(
21361 <fidl::encoding::HandleType<
21362 fidl::Event,
21363 { fidl::ObjectType::EVENT.into_raw() },
21364 2147483648,
21365 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21366 ),
21367 encoder,
21368 offset + cur_offset,
21369 depth,
21370 )?;
21371
21372 _prev_end_offset = cur_offset + envelope_size;
21373
21374 Ok(())
21375 }
21376 }
21377
21378 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21379 for NodeGetNodeRefResponse
21380 {
21381 #[inline(always)]
21382 fn new_empty() -> Self {
21383 Self::default()
21384 }
21385
21386 unsafe fn decode(
21387 &mut self,
21388 decoder: &mut fidl::encoding::Decoder<
21389 '_,
21390 fidl::encoding::DefaultFuchsiaResourceDialect,
21391 >,
21392 offset: usize,
21393 mut depth: fidl::encoding::Depth,
21394 ) -> fidl::Result<()> {
21395 decoder.debug_check_bounds::<Self>(offset);
21396 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21397 None => return Err(fidl::Error::NotNullable),
21398 Some(len) => len,
21399 };
21400 // Calling decoder.out_of_line_offset(0) is not allowed.
21401 if len == 0 {
21402 return Ok(());
21403 };
21404 depth.increment()?;
21405 let envelope_size = 8;
21406 let bytes_len = len * envelope_size;
21407 let offset = decoder.out_of_line_offset(bytes_len)?;
21408 // Decode the envelope for each type.
21409 let mut _next_ordinal_to_read = 0;
21410 let mut next_offset = offset;
21411 let end_offset = offset + bytes_len;
21412 _next_ordinal_to_read += 1;
21413 if next_offset >= end_offset {
21414 return Ok(());
21415 }
21416
21417 // Decode unknown envelopes for gaps in ordinals.
21418 while _next_ordinal_to_read < 1 {
21419 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21420 _next_ordinal_to_read += 1;
21421 next_offset += envelope_size;
21422 }
21423
21424 let next_out_of_line = decoder.next_out_of_line();
21425 let handles_before = decoder.remaining_handles();
21426 if let Some((inlined, num_bytes, num_handles)) =
21427 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21428 {
21429 let member_inline_size = <fidl::encoding::HandleType<
21430 fidl::Event,
21431 { fidl::ObjectType::EVENT.into_raw() },
21432 2147483648,
21433 > as fidl::encoding::TypeMarker>::inline_size(
21434 decoder.context
21435 );
21436 if inlined != (member_inline_size <= 4) {
21437 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21438 }
21439 let inner_offset;
21440 let mut inner_depth = depth.clone();
21441 if inlined {
21442 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21443 inner_offset = next_offset;
21444 } else {
21445 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21446 inner_depth.increment()?;
21447 }
21448 let val_ref =
21449 self.node_ref.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21450 fidl::decode!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21451 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21452 {
21453 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21454 }
21455 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21456 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21457 }
21458 }
21459
21460 next_offset += envelope_size;
21461
21462 // Decode the remaining unknown envelopes.
21463 while next_offset < end_offset {
21464 _next_ordinal_to_read += 1;
21465 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21466 next_offset += envelope_size;
21467 }
21468
21469 Ok(())
21470 }
21471 }
21472
21473 impl VmoBuffer {
21474 #[inline(always)]
21475 fn max_ordinal_present(&self) -> u64 {
21476 if let Some(_) = self.close_weak_asap {
21477 return 3;
21478 }
21479 if let Some(_) = self.vmo_usable_start {
21480 return 2;
21481 }
21482 if let Some(_) = self.vmo {
21483 return 1;
21484 }
21485 0
21486 }
21487 }
21488
21489 impl fidl::encoding::ResourceTypeMarker for VmoBuffer {
21490 type Borrowed<'a> = &'a mut Self;
21491 fn take_or_borrow<'a>(
21492 value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21493 ) -> Self::Borrowed<'a> {
21494 value
21495 }
21496 }
21497
21498 unsafe impl fidl::encoding::TypeMarker for VmoBuffer {
21499 type Owned = Self;
21500
21501 #[inline(always)]
21502 fn inline_align(_context: fidl::encoding::Context) -> usize {
21503 8
21504 }
21505
21506 #[inline(always)]
21507 fn inline_size(_context: fidl::encoding::Context) -> usize {
21508 16
21509 }
21510 }
21511
21512 unsafe impl fidl::encoding::Encode<VmoBuffer, fidl::encoding::DefaultFuchsiaResourceDialect>
21513 for &mut VmoBuffer
21514 {
21515 unsafe fn encode(
21516 self,
21517 encoder: &mut fidl::encoding::Encoder<
21518 '_,
21519 fidl::encoding::DefaultFuchsiaResourceDialect,
21520 >,
21521 offset: usize,
21522 mut depth: fidl::encoding::Depth,
21523 ) -> fidl::Result<()> {
21524 encoder.debug_check_bounds::<VmoBuffer>(offset);
21525 // Vector header
21526 let max_ordinal: u64 = self.max_ordinal_present();
21527 encoder.write_num(max_ordinal, offset);
21528 encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21529 // Calling encoder.out_of_line_offset(0) is not allowed.
21530 if max_ordinal == 0 {
21531 return Ok(());
21532 }
21533 depth.increment()?;
21534 let envelope_size = 8;
21535 let bytes_len = max_ordinal as usize * envelope_size;
21536 #[allow(unused_variables)]
21537 let offset = encoder.out_of_line_offset(bytes_len);
21538 let mut _prev_end_offset: usize = 0;
21539 if 1 > max_ordinal {
21540 return Ok(());
21541 }
21542
21543 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21544 // are envelope_size bytes.
21545 let cur_offset: usize = (1 - 1) * envelope_size;
21546
21547 // Zero reserved fields.
21548 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21549
21550 // Safety:
21551 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21552 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21553 // envelope_size bytes, there is always sufficient room.
21554 fidl::encoding::encode_in_envelope_optional::<
21555 fidl::encoding::HandleType<
21556 fidl::Vmo,
21557 { fidl::ObjectType::VMO.into_raw() },
21558 2147483648,
21559 >,
21560 fidl::encoding::DefaultFuchsiaResourceDialect,
21561 >(
21562 self.vmo.as_mut().map(
21563 <fidl::encoding::HandleType<
21564 fidl::Vmo,
21565 { fidl::ObjectType::VMO.into_raw() },
21566 2147483648,
21567 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21568 ),
21569 encoder,
21570 offset + cur_offset,
21571 depth,
21572 )?;
21573
21574 _prev_end_offset = cur_offset + envelope_size;
21575 if 2 > max_ordinal {
21576 return Ok(());
21577 }
21578
21579 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21580 // are envelope_size bytes.
21581 let cur_offset: usize = (2 - 1) * envelope_size;
21582
21583 // Zero reserved fields.
21584 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21585
21586 // Safety:
21587 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21588 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21589 // envelope_size bytes, there is always sufficient room.
21590 fidl::encoding::encode_in_envelope_optional::<
21591 u64,
21592 fidl::encoding::DefaultFuchsiaResourceDialect,
21593 >(
21594 self.vmo_usable_start
21595 .as_ref()
21596 .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
21597 encoder,
21598 offset + cur_offset,
21599 depth,
21600 )?;
21601
21602 _prev_end_offset = cur_offset + envelope_size;
21603 if 3 > max_ordinal {
21604 return Ok(());
21605 }
21606
21607 // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21608 // are envelope_size bytes.
21609 let cur_offset: usize = (3 - 1) * envelope_size;
21610
21611 // Zero reserved fields.
21612 encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21613
21614 // Safety:
21615 // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21616 // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21617 // envelope_size bytes, there is always sufficient room.
21618 fidl::encoding::encode_in_envelope_optional::<
21619 fidl::encoding::HandleType<
21620 fidl::EventPair,
21621 { fidl::ObjectType::EVENTPAIR.into_raw() },
21622 2147483648,
21623 >,
21624 fidl::encoding::DefaultFuchsiaResourceDialect,
21625 >(
21626 self.close_weak_asap.as_mut().map(
21627 <fidl::encoding::HandleType<
21628 fidl::EventPair,
21629 { fidl::ObjectType::EVENTPAIR.into_raw() },
21630 2147483648,
21631 > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21632 ),
21633 encoder,
21634 offset + cur_offset,
21635 depth,
21636 )?;
21637
21638 _prev_end_offset = cur_offset + envelope_size;
21639
21640 Ok(())
21641 }
21642 }
21643
21644 impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect> for VmoBuffer {
21645 #[inline(always)]
21646 fn new_empty() -> Self {
21647 Self::default()
21648 }
21649
21650 unsafe fn decode(
21651 &mut self,
21652 decoder: &mut fidl::encoding::Decoder<
21653 '_,
21654 fidl::encoding::DefaultFuchsiaResourceDialect,
21655 >,
21656 offset: usize,
21657 mut depth: fidl::encoding::Depth,
21658 ) -> fidl::Result<()> {
21659 decoder.debug_check_bounds::<Self>(offset);
21660 let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21661 None => return Err(fidl::Error::NotNullable),
21662 Some(len) => len,
21663 };
21664 // Calling decoder.out_of_line_offset(0) is not allowed.
21665 if len == 0 {
21666 return Ok(());
21667 };
21668 depth.increment()?;
21669 let envelope_size = 8;
21670 let bytes_len = len * envelope_size;
21671 let offset = decoder.out_of_line_offset(bytes_len)?;
21672 // Decode the envelope for each type.
21673 let mut _next_ordinal_to_read = 0;
21674 let mut next_offset = offset;
21675 let end_offset = offset + bytes_len;
21676 _next_ordinal_to_read += 1;
21677 if next_offset >= end_offset {
21678 return Ok(());
21679 }
21680
21681 // Decode unknown envelopes for gaps in ordinals.
21682 while _next_ordinal_to_read < 1 {
21683 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21684 _next_ordinal_to_read += 1;
21685 next_offset += envelope_size;
21686 }
21687
21688 let next_out_of_line = decoder.next_out_of_line();
21689 let handles_before = decoder.remaining_handles();
21690 if let Some((inlined, num_bytes, num_handles)) =
21691 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21692 {
21693 let member_inline_size = <fidl::encoding::HandleType<
21694 fidl::Vmo,
21695 { fidl::ObjectType::VMO.into_raw() },
21696 2147483648,
21697 > as fidl::encoding::TypeMarker>::inline_size(
21698 decoder.context
21699 );
21700 if inlined != (member_inline_size <= 4) {
21701 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21702 }
21703 let inner_offset;
21704 let mut inner_depth = depth.clone();
21705 if inlined {
21706 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21707 inner_offset = next_offset;
21708 } else {
21709 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21710 inner_depth.increment()?;
21711 }
21712 let val_ref =
21713 self.vmo.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21714 fidl::decode!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21715 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21716 {
21717 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21718 }
21719 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21720 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21721 }
21722 }
21723
21724 next_offset += envelope_size;
21725 _next_ordinal_to_read += 1;
21726 if next_offset >= end_offset {
21727 return Ok(());
21728 }
21729
21730 // Decode unknown envelopes for gaps in ordinals.
21731 while _next_ordinal_to_read < 2 {
21732 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21733 _next_ordinal_to_read += 1;
21734 next_offset += envelope_size;
21735 }
21736
21737 let next_out_of_line = decoder.next_out_of_line();
21738 let handles_before = decoder.remaining_handles();
21739 if let Some((inlined, num_bytes, num_handles)) =
21740 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21741 {
21742 let member_inline_size =
21743 <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
21744 if inlined != (member_inline_size <= 4) {
21745 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21746 }
21747 let inner_offset;
21748 let mut inner_depth = depth.clone();
21749 if inlined {
21750 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21751 inner_offset = next_offset;
21752 } else {
21753 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21754 inner_depth.increment()?;
21755 }
21756 let val_ref = self.vmo_usable_start.get_or_insert_with(|| {
21757 fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
21758 });
21759 fidl::decode!(
21760 u64,
21761 fidl::encoding::DefaultFuchsiaResourceDialect,
21762 val_ref,
21763 decoder,
21764 inner_offset,
21765 inner_depth
21766 )?;
21767 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21768 {
21769 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21770 }
21771 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21772 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21773 }
21774 }
21775
21776 next_offset += envelope_size;
21777 _next_ordinal_to_read += 1;
21778 if next_offset >= end_offset {
21779 return Ok(());
21780 }
21781
21782 // Decode unknown envelopes for gaps in ordinals.
21783 while _next_ordinal_to_read < 3 {
21784 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21785 _next_ordinal_to_read += 1;
21786 next_offset += envelope_size;
21787 }
21788
21789 let next_out_of_line = decoder.next_out_of_line();
21790 let handles_before = decoder.remaining_handles();
21791 if let Some((inlined, num_bytes, num_handles)) =
21792 fidl::encoding::decode_envelope_header(decoder, next_offset)?
21793 {
21794 let member_inline_size = <fidl::encoding::HandleType<
21795 fidl::EventPair,
21796 { fidl::ObjectType::EVENTPAIR.into_raw() },
21797 2147483648,
21798 > as fidl::encoding::TypeMarker>::inline_size(
21799 decoder.context
21800 );
21801 if inlined != (member_inline_size <= 4) {
21802 return Err(fidl::Error::InvalidInlineBitInEnvelope);
21803 }
21804 let inner_offset;
21805 let mut inner_depth = depth.clone();
21806 if inlined {
21807 decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21808 inner_offset = next_offset;
21809 } else {
21810 inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21811 inner_depth.increment()?;
21812 }
21813 let val_ref =
21814 self.close_weak_asap.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21815 fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21816 if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21817 {
21818 return Err(fidl::Error::InvalidNumBytesInEnvelope);
21819 }
21820 if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21821 return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21822 }
21823 }
21824
21825 next_offset += envelope_size;
21826
21827 // Decode the remaining unknown envelopes.
21828 while next_offset < end_offset {
21829 _next_ordinal_to_read += 1;
21830 fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21831 next_offset += envelope_size;
21832 }
21833
21834 Ok(())
21835 }
21836 }
21837}