fidl_fuchsia_sysmem2/
fidl_fuchsia_sysmem2.rs

1// WARNING: This file is machine generated by fidlgen.
2
3#![warn(clippy::all)]
4#![allow(unused_parens, unused_mut, unused_imports, nonstandard_style)]
5
6use bitflags::bitflags;
7use fidl::client::QueryResponseFut;
8use fidl::encoding::{MessageBufFor, ProxyChannelBox, ResourceDialect};
9use fidl::endpoints::{ControlHandle as _, Responder as _};
10pub use fidl_fuchsia_sysmem2_common::*;
11use futures::future::{self, MaybeDone, TryFutureExt};
12use zx_status;
13
14#[derive(Debug, Default, PartialEq)]
15pub struct AllocatorAllocateNonSharedCollectionRequest {
16    pub collection_request: Option<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17    #[doc(hidden)]
18    pub __source_breaking: fidl::marker::SourceBreaking,
19}
20
21impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
22    for AllocatorAllocateNonSharedCollectionRequest
23{
24}
25
26#[derive(Debug, Default, PartialEq)]
27pub struct AllocatorAllocateSharedCollectionRequest {
28    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
29    #[doc(hidden)]
30    pub __source_breaking: fidl::marker::SourceBreaking,
31}
32
33impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
34    for AllocatorAllocateSharedCollectionRequest
35{
36}
37
38#[derive(Debug, Default, PartialEq)]
39pub struct AllocatorBindSharedCollectionRequest {
40    pub token: Option<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>,
41    pub buffer_collection_request: Option<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
42    #[doc(hidden)]
43    pub __source_breaking: fidl::marker::SourceBreaking,
44}
45
46impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
47    for AllocatorBindSharedCollectionRequest
48{
49}
50
51#[derive(Debug, Default, PartialEq)]
52pub struct AllocatorGetVmoInfoRequest {
53    /// `vmo` is required to be set; ownership is transferred to the server
54    /// so in most cases a client will duplicate a handle and transfer the
55    /// duplicate via this field.
56    pub vmo: Option<fidl::Vmo>,
57    #[doc(hidden)]
58    pub __source_breaking: fidl::marker::SourceBreaking,
59}
60
61impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
62    for AllocatorGetVmoInfoRequest
63{
64}
65
66#[derive(Debug, Default, PartialEq)]
67pub struct AllocatorGetVmoInfoResponse {
68    pub buffer_collection_id: Option<u64>,
69    pub buffer_index: Option<u64>,
70    pub close_weak_asap: Option<fidl::EventPair>,
71    #[doc(hidden)]
72    pub __source_breaking: fidl::marker::SourceBreaking,
73}
74
75impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
76    for AllocatorGetVmoInfoResponse
77{
78}
79
80#[derive(Debug, Default, PartialEq)]
81pub struct BufferCollectionAttachLifetimeTrackingRequest {
82    pub server_end: Option<fidl::EventPair>,
83    pub buffers_remaining: Option<u32>,
84    #[doc(hidden)]
85    pub __source_breaking: fidl::marker::SourceBreaking,
86}
87
88impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
89    for BufferCollectionAttachLifetimeTrackingRequest
90{
91}
92
93#[derive(Debug, Default, PartialEq)]
94pub struct BufferCollectionAttachTokenRequest {
95    pub rights_attenuation_mask: Option<fidl::Rights>,
96    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
97    #[doc(hidden)]
98    pub __source_breaking: fidl::marker::SourceBreaking,
99}
100
101impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
102    for BufferCollectionAttachTokenRequest
103{
104}
105
106/// Information about a buffer collection and its buffers.
107#[derive(Debug, Default, PartialEq)]
108pub struct BufferCollectionInfo {
109    /// These settings apply to all the buffers in the initial buffer
110    /// allocation.
111    ///
112    /// This field will always be set by sysmem.
113    pub settings: Option<SingleBufferSettings>,
114    /// VMO handles (and vmo_usable_start offset) for each buffer in the
115    /// collection.
116    ///
117    /// The size of this vector is the buffer_count (buffer_count is not sent
118    /// separately).
119    ///
120    /// All buffer VMO handles have identical size and access rights.  The size
121    /// is in settings.buffer_settings.size_bytes.
122    ///
123    /// The VMO access rights are determined based on the usages which the
124    /// client specified when allocating the buffer collection.  For example, a
125    /// client which expressed a read-only usage will receive VMOs without write
126    /// rights.  In addition, the rights can be attenuated by the parameter to
127    /// BufferCollectionToken.Duplicate() calls.
128    ///
129    /// This field will always have VmoBuffer(s) in it, even if the participant
130    /// specifies usage whieh does not require VMO handles.  This permits such a
131    /// participant to know the vmo_usable_start values, in case that's of any
132    /// use to the participant.
133    ///
134    /// This field will always be set by sysmem, even if the participant doesn't
135    /// specify any buffer usage (but the [`fuchsia.sysmem2/VmoBuffer.vmo`]
136    /// sub-field within this field won't be set in that case).
137    pub buffers: Option<Vec<VmoBuffer>>,
138    /// This number is unique among all logical buffer collections per boot.
139    ///
140    /// This ID number will be the same for all BufferCollectionToken(s),
141    /// BufferCollection(s), and BufferCollectionTokenGroup(s) associated with
142    /// the same logical buffer collection (derived from the same root token
143    /// created with fuchsia.sysmem2.Allocator.CreateSharedCollection, or with
144    /// CreateNonSharedCollection).
145    ///
146    /// The same ID can be retrieved from a BufferCollectionToken,
147    /// BufferCollection, or BufferCollectionTokenGroup using
148    /// GetBufferCollectionId (at the cost of a round-trip to sysmem and back).
149    ///
150    /// This field will always be set by sysmem.
151    pub buffer_collection_id: Option<u64>,
152    #[doc(hidden)]
153    pub __source_breaking: fidl::marker::SourceBreaking,
154}
155
156impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for BufferCollectionInfo {}
157
158#[derive(Debug, Default, PartialEq)]
159pub struct BufferCollectionSetConstraintsRequest {
160    pub constraints: Option<BufferCollectionConstraints>,
161    #[doc(hidden)]
162    pub __source_breaking: fidl::marker::SourceBreaking,
163}
164
165impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
166    for BufferCollectionSetConstraintsRequest
167{
168}
169
170#[derive(Debug, Default, PartialEq)]
171pub struct BufferCollectionTokenCreateBufferCollectionTokenGroupRequest {
172    pub group_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>>,
173    #[doc(hidden)]
174    pub __source_breaking: fidl::marker::SourceBreaking,
175}
176
177impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
178    for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
179{
180}
181
182#[derive(Debug, Default, PartialEq)]
183pub struct BufferCollectionTokenDuplicateRequest {
184    pub rights_attenuation_mask: Option<fidl::Rights>,
185    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
186    #[doc(hidden)]
187    pub __source_breaking: fidl::marker::SourceBreaking,
188}
189
190impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
191    for BufferCollectionTokenDuplicateRequest
192{
193}
194
195#[derive(Debug, Default, PartialEq)]
196pub struct BufferCollectionTokenGroupCreateChildRequest {
197    /// Must be set.
198    pub token_request: Option<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
199    /// If not set, the default is `ZX_RIGHT_SAME_RIGHTS`.
200    pub rights_attenuation_mask: Option<fidl::Rights>,
201    #[doc(hidden)]
202    pub __source_breaking: fidl::marker::SourceBreaking,
203}
204
205impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
206    for BufferCollectionTokenGroupCreateChildRequest
207{
208}
209
210#[derive(Debug, Default, PartialEq)]
211pub struct BufferCollectionTokenGroupCreateChildrenSyncResponse {
212    pub tokens: Option<Vec<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>>,
213    #[doc(hidden)]
214    pub __source_breaking: fidl::marker::SourceBreaking,
215}
216
217impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
218    for BufferCollectionTokenGroupCreateChildrenSyncResponse
219{
220}
221
222#[derive(Debug, Default, PartialEq)]
223pub struct BufferCollectionTokenDuplicateSyncResponse {
224    pub tokens: Option<Vec<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>>,
225    #[doc(hidden)]
226    pub __source_breaking: fidl::marker::SourceBreaking,
227}
228
229impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
230    for BufferCollectionTokenDuplicateSyncResponse
231{
232}
233
234#[derive(Debug, Default, PartialEq)]
235pub struct BufferCollectionWaitForAllBuffersAllocatedResponse {
236    pub buffer_collection_info: Option<BufferCollectionInfo>,
237    #[doc(hidden)]
238    pub __source_breaking: fidl::marker::SourceBreaking,
239}
240
241impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
242    for BufferCollectionWaitForAllBuffersAllocatedResponse
243{
244}
245
246#[derive(Debug, Default, PartialEq)]
247pub struct NodeAttachNodeTrackingRequest {
248    /// This field must be set. This evenpair end will be closed after the
249    /// `Node` is closed or failed and the node's buffer counts are no
250    /// longer in effect in the logical buffer collection.
251    pub server_end: Option<fidl::EventPair>,
252    #[doc(hidden)]
253    pub __source_breaking: fidl::marker::SourceBreaking,
254}
255
256impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
257    for NodeAttachNodeTrackingRequest
258{
259}
260
261#[derive(Debug, Default, PartialEq)]
262pub struct NodeIsAlternateForRequest {
263    pub node_ref: Option<fidl::Event>,
264    #[doc(hidden)]
265    pub __source_breaking: fidl::marker::SourceBreaking,
266}
267
268impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeIsAlternateForRequest {}
269
270#[derive(Debug, Default, PartialEq)]
271pub struct NodeSetWeakOkRequest {
272    pub for_child_nodes_also: Option<bool>,
273    #[doc(hidden)]
274    pub __source_breaking: fidl::marker::SourceBreaking,
275}
276
277impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeSetWeakOkRequest {}
278
279#[derive(Debug, Default, PartialEq)]
280pub struct NodeGetNodeRefResponse {
281    pub node_ref: Option<fidl::Event>,
282    #[doc(hidden)]
283    pub __source_breaking: fidl::marker::SourceBreaking,
284}
285
286impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for NodeGetNodeRefResponse {}
287
288#[derive(Debug, Default, PartialEq)]
289pub struct VmoBuffer {
290    /// `vmo` can be un-set if a participant has only
291    /// [`fuchsia.sysmem2/BufferUsage.none`] set to `NONE_USAGE` (explicitly or
292    /// implicitly by [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
293    /// without `constraints` set).
294    pub vmo: Option<fidl::Vmo>,
295    /// Offset within the VMO of the first usable byte. Must be < the VMO's size
296    /// in bytes, and leave sufficient room for BufferMemorySettings.size_bytes
297    /// before the end of the VMO.
298    ///
299    /// Currently sysmem will always set this field to 0, and in future, sysmem
300    /// won't set this field to a non-zero value unless all participants have
301    /// explicitly indicated support for non-zero vmo_usable_start (this
302    /// mechanism does not exist as of this comment). A participant that hasn't
303    /// explicitly indicated support for non-zero vmo_usable_start (all current
304    /// clients) should implicitly assume this field is set to 0 without
305    /// actually checking this field.
306    pub vmo_usable_start: Option<u64>,
307    /// This field is set iff `vmo` is a sysmem weak VMO handle. The client must
308    /// keep `close_weak_asap` around for as long as `vmo`, and must notice
309    /// `ZX_EVENTPAIR_PEER_CLOSED`. If that signal occurs, the client must close
310    /// `vmo` asap. Not doing so is considered a VMO leak by the client and in
311    /// that case sysmem will eventually complain loudly via syslog (currently
312    /// 5s later).
313    pub close_weak_asap: Option<fidl::EventPair>,
314    #[doc(hidden)]
315    pub __source_breaking: fidl::marker::SourceBreaking,
316}
317
318impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect> for VmoBuffer {}
319
320#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
321pub struct AllocatorMarker;
322
323impl fidl::endpoints::ProtocolMarker for AllocatorMarker {
324    type Proxy = AllocatorProxy;
325    type RequestStream = AllocatorRequestStream;
326    #[cfg(target_os = "fuchsia")]
327    type SynchronousProxy = AllocatorSynchronousProxy;
328
329    const DEBUG_NAME: &'static str = "fuchsia.sysmem2.Allocator";
330}
331impl fidl::endpoints::DiscoverableProtocolMarker for AllocatorMarker {}
332pub type AllocatorGetVmoInfoResult = Result<AllocatorGetVmoInfoResponse, Error>;
333
334pub trait AllocatorProxyInterface: Send + Sync {
335    fn r#allocate_non_shared_collection(
336        &self,
337        payload: AllocatorAllocateNonSharedCollectionRequest,
338    ) -> Result<(), fidl::Error>;
339    fn r#allocate_shared_collection(
340        &self,
341        payload: AllocatorAllocateSharedCollectionRequest,
342    ) -> Result<(), fidl::Error>;
343    fn r#bind_shared_collection(
344        &self,
345        payload: AllocatorBindSharedCollectionRequest,
346    ) -> Result<(), fidl::Error>;
347    type ValidateBufferCollectionTokenResponseFut: std::future::Future<
348            Output = Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error>,
349        > + Send;
350    fn r#validate_buffer_collection_token(
351        &self,
352        payload: &AllocatorValidateBufferCollectionTokenRequest,
353    ) -> Self::ValidateBufferCollectionTokenResponseFut;
354    fn r#set_debug_client_info(
355        &self,
356        payload: &AllocatorSetDebugClientInfoRequest,
357    ) -> Result<(), fidl::Error>;
358    type GetVmoInfoResponseFut: std::future::Future<Output = Result<AllocatorGetVmoInfoResult, fidl::Error>>
359        + Send;
360    fn r#get_vmo_info(&self, payload: AllocatorGetVmoInfoRequest) -> Self::GetVmoInfoResponseFut;
361}
362#[derive(Debug)]
363#[cfg(target_os = "fuchsia")]
364pub struct AllocatorSynchronousProxy {
365    client: fidl::client::sync::Client,
366}
367
368#[cfg(target_os = "fuchsia")]
369impl fidl::endpoints::SynchronousProxy for AllocatorSynchronousProxy {
370    type Proxy = AllocatorProxy;
371    type Protocol = AllocatorMarker;
372
373    fn from_channel(inner: fidl::Channel) -> Self {
374        Self::new(inner)
375    }
376
377    fn into_channel(self) -> fidl::Channel {
378        self.client.into_channel()
379    }
380
381    fn as_channel(&self) -> &fidl::Channel {
382        self.client.as_channel()
383    }
384}
385
386#[cfg(target_os = "fuchsia")]
387impl AllocatorSynchronousProxy {
388    pub fn new(channel: fidl::Channel) -> Self {
389        let protocol_name = <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
390        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
391    }
392
393    pub fn into_channel(self) -> fidl::Channel {
394        self.client.into_channel()
395    }
396
397    /// Waits until an event arrives and returns it. It is safe for other
398    /// threads to make concurrent requests while waiting for an event.
399    pub fn wait_for_event(
400        &self,
401        deadline: zx::MonotonicInstant,
402    ) -> Result<AllocatorEvent, fidl::Error> {
403        AllocatorEvent::decode(self.client.wait_for_event(deadline)?)
404    }
405
406    /// Allocates a buffer collection on behalf of a single client (aka
407    /// initiator) who is also the only participant (from the point of view of
408    /// sysmem).
409    ///
410    /// This call exists mainly for temp/testing purposes.  This call skips the
411    /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
412    /// allow another participant to specify its constraints.
413    ///
414    /// Real clients are encouraged to use
415    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
416    /// let relevant participants directly convey their own constraints to
417    /// sysmem by sending `BufferCollectionToken`s to those participants.
418    ///
419    /// + request `collection_request` The server end of the
420    ///   [`fuchsia.sysmem2/BufferCollection`].
421    pub fn r#allocate_non_shared_collection(
422        &self,
423        mut payload: AllocatorAllocateNonSharedCollectionRequest,
424    ) -> Result<(), fidl::Error> {
425        self.client.send::<AllocatorAllocateNonSharedCollectionRequest>(
426            &mut payload,
427            0x5ca681f025a80e44,
428            fidl::encoding::DynamicFlags::FLEXIBLE,
429        )
430    }
431
432    /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
433    ///
434    /// The `BufferCollectionToken` can be "duplicated" for distribution to
435    /// participants by using
436    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
437    /// `BufferCollectionToken` can be converted into a
438    /// [`fuchsia.sysmem2.BufferCollection`] using
439    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
440    ///
441    /// Buffer constraints can be set via
442    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
443    ///
444    /// Success/failure to populate the buffer collection with buffers can be
445    /// determined from
446    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
447    ///
448    /// Closing the client end of a `BufferCollectionToken` or
449    /// `BufferCollection` (without `Release` first) will fail all client ends
450    /// in the same failure domain, which by default is all client ends of the
451    /// buffer collection. See
452    /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
453    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
454    /// separate failure domains within a buffer collection.
455    pub fn r#allocate_shared_collection(
456        &self,
457        mut payload: AllocatorAllocateSharedCollectionRequest,
458    ) -> Result<(), fidl::Error> {
459        self.client.send::<AllocatorAllocateSharedCollectionRequest>(
460            &mut payload,
461            0x11a19ff51f0b49c1,
462            fidl::encoding::DynamicFlags::FLEXIBLE,
463        )
464    }
465
466    /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
467    /// [`fuchsia.sysmem2/BufferCollection`].
468    ///
469    /// At the time of sending this message, the buffer collection hasn't yet
470    /// been populated with buffers - the participant must first also send
471    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
472    /// `BufferCollection` client end.
473    ///
474    /// All `BufferCollectionToken`(s) duplicated from a root
475    /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
476    /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
477    /// existing `BufferCollection` client ends must have sent `SetConstraints`
478    /// before the logical BufferCollection will be populated with buffers (or
479    /// will fail if the overall set of constraints can't be satisfied).
480    ///
481    /// + request `token` The client endpoint of a channel whose server end was
482    ///   sent to sysmem using
483    ///   [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
484    ///   end was sent to sysmem using
485    ///   [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`].  The token is
486    ///   being "turned in" in exchange for a
487    ///   [`fuchsia.sysmem2/BufferCollection`].
488    /// + request `buffer_collection_request` The server end of a
489    ///   [`fuchsia.sysmem2/BufferCollection`] channel.  The sender retains the
490    ///   client end. The `BufferCollection` channel is a single participant's
491    ///   connection to the logical buffer collection. Typically there will be
492    ///   other participants with their own `BufferCollection` channel to the
493    ///   logical buffer collection.
494    pub fn r#bind_shared_collection(
495        &self,
496        mut payload: AllocatorBindSharedCollectionRequest,
497    ) -> Result<(), fidl::Error> {
498        self.client.send::<AllocatorBindSharedCollectionRequest>(
499            &mut payload,
500            0x550916b0dc1d5b4e,
501            fidl::encoding::DynamicFlags::FLEXIBLE,
502        )
503    }
504
505    /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
506    /// the sysmem server.
507    ///
508    /// With this call, the client can determine whether an incoming token is a
509    /// real sysmem token that is known to the sysmem server, without any risk
510    /// of getting stuck waiting forever on a potentially fake token to complete
511    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
512    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
513    /// FIDL message). In cases where the client trusts the source of the token
514    /// to provide a real token, this call is not typically needed outside of
515    /// debugging.
516    ///
517    /// If the validate fails sometimes but succeeds other times, the source of
518    /// the token may itself not be calling
519    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
520    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
521    /// token but before sending the token to the current client. It may be more
522    /// convenient for the source to use
523    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
524    /// token(s), since that call has the sync step built in. Or, the buffer
525    /// collection may be failing before this call is processed by the sysmem
526    /// server, as buffer collection failure cleans up sysmem's tracking of
527    /// associated tokens.
528    ///
529    /// This call has no effect on any token.
530    ///
531    /// + request `token_server_koid` The koid of the server end of a channel
532    ///   that might be a BufferCollectionToken channel.  This can be obtained
533    ///   via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
534    /// - response `is_known` true means sysmem knew of the token at the time
535    ///   sysmem processed the request, but doesn't guarantee that the token is
536    ///   still valid by the time the client receives the reply. What it does
537    ///   guarantee is that the token at least was a real token, so a two-way
538    ///   call to the token won't stall forever (will fail or succeed fairly
539    ///   quickly, not stall). This can already be known implicitly if the
540    ///   source of the token can be trusted to provide a real token. A false
541    ///   value means the token wasn't known to sysmem at the time sysmem
542    ///   processed this call, but the token may have previously been valid, or
543    ///   may yet become valid. Or if the sender of the token isn't trusted to
544    ///   provide a real token, the token may be fake. It's the responsibility
545    ///   of the sender to sync with sysmem to ensure that previously
546    ///   created/duplicated token(s) are known to sysmem, before sending the
547    ///   token(s) to other participants.
548    pub fn r#validate_buffer_collection_token(
549        &self,
550        mut payload: &AllocatorValidateBufferCollectionTokenRequest,
551        ___deadline: zx::MonotonicInstant,
552    ) -> Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error> {
553        let _response = self.client.send_query::<
554            AllocatorValidateBufferCollectionTokenRequest,
555            fidl::encoding::FlexibleType<AllocatorValidateBufferCollectionTokenResponse>,
556        >(
557            payload,
558            0x4c5ee91b02a7e68d,
559            fidl::encoding::DynamicFlags::FLEXIBLE,
560            ___deadline,
561        )?
562        .into_result::<AllocatorMarker>("validate_buffer_collection_token")?;
563        Ok(_response)
564    }
565
566    /// Set information about the current client that can be used by sysmem to
567    /// help diagnose leaking memory and allocation stalls waiting for a
568    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
569    ///
570    /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
571    /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
572    /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
573    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
574    /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
575    /// these `BufferCollection`(s) have the same initial debug client info as
576    /// the token turned in to create the `BufferCollection`).
577    ///
578    /// This info can be subsequently overridden on a per-`Node` basis by
579    /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
580    ///
581    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
582    /// `Allocator` is the most efficient way to ensure that all
583    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
584    /// set, and is also more efficient than separately sending the same debug
585    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
586    /// created [`fuchsia.sysmem2/Node`].
587    ///
588    /// + request `name` This can be an arbitrary string, but the current
589    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
590    /// + request `id` This can be an arbitrary id, but the current process ID
591    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
592    pub fn r#set_debug_client_info(
593        &self,
594        mut payload: &AllocatorSetDebugClientInfoRequest,
595    ) -> Result<(), fidl::Error> {
596        self.client.send::<AllocatorSetDebugClientInfoRequest>(
597            payload,
598            0x6f68f19a3f509c4d,
599            fidl::encoding::DynamicFlags::FLEXIBLE,
600        )
601    }
602
603    /// Given a handle to a sysmem-provided VMO, this returns additional info
604    /// about the corresponding sysmem logical buffer.
605    ///
606    /// Most callers will duplicate a VMO handle first and send the duplicate to
607    /// this call.
608    ///
609    /// If the client has created a child VMO of a sysmem-provided VMO, that
610    /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
611    ///
612    /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
613    /// - response `buffer_collection_id` The buffer collection ID, which is
614    ///   unique per logical buffer collection per boot.
615    /// - response `buffer_index` The buffer index of the buffer within the
616    ///   buffer collection. This is the same as the index of the buffer within
617    ///   [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
618    ///   is the same for all sysmem-delivered VMOs corresponding to the same
619    ///   logical buffer, even if the VMO koids differ. The `buffer_index` is
620    ///   only unique across buffers of a buffer collection. For a given buffer,
621    ///   the combination of `buffer_collection_id` and `buffer_index` is unique
622    ///   per boot.
623    /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
624    ///   the `close_weak_asap` field will be set in the response. This handle
625    ///   will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
626    ///   the buffer should be closed as soon as possible. This is signalled
627    ///   shortly after all strong sysmem VMOs to the buffer are closed
628    ///   (including any held indirectly via strong `BufferCollectionToken` or
629    ///   strong `BufferCollection`). Failure to close all weak sysmem VMO
630    ///   handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
631    ///   considered a VMO leak caused by the client still holding a weak sysmem
632    ///   VMO handle and results in loud complaints to the log by sysmem. The
633    ///   buffers of a collection can be freed independently of each other. The
634    ///   `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
635    ///   response arrives at the client. A client that isn't prepared to handle
636    ///   weak sysmem VMOs, on seeing this field set, can close all handles to
637    ///   the buffer and fail any associated request.
638    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
639    ///   VMO. Both strong and weak sysmem VMOs can be passed to this call, and
640    ///   the VMO handle passed in to this call itself keeps the VMO's info
641    ///   alive for purposes of responding to this call. Because of this,
642    ///   ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
643    ///   handles to the VMO when calling; even if other handles are closed
644    ///   before the GetVmoInfo response arrives at the client).
645    /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
646    ///   capable of being used with GetVmoInfo due to rights/capability
647    ///   attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
648    ///   topic [`ZX_INFO_HANDLE_BASIC`].
649    /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
650    ///   unspecified reason. See the log for more info.
651    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
652    ///   wasn't set, or there was some other problem with the request field(s).
653    pub fn r#get_vmo_info(
654        &self,
655        mut payload: AllocatorGetVmoInfoRequest,
656        ___deadline: zx::MonotonicInstant,
657    ) -> Result<AllocatorGetVmoInfoResult, fidl::Error> {
658        let _response = self.client.send_query::<
659            AllocatorGetVmoInfoRequest,
660            fidl::encoding::FlexibleResultType<AllocatorGetVmoInfoResponse, Error>,
661        >(
662            &mut payload,
663            0x21a881120aa0ddf9,
664            fidl::encoding::DynamicFlags::FLEXIBLE,
665            ___deadline,
666        )?
667        .into_result::<AllocatorMarker>("get_vmo_info")?;
668        Ok(_response.map(|x| x))
669    }
670}
671
672#[cfg(target_os = "fuchsia")]
673impl From<AllocatorSynchronousProxy> for zx::Handle {
674    fn from(value: AllocatorSynchronousProxy) -> Self {
675        value.into_channel().into()
676    }
677}
678
679#[cfg(target_os = "fuchsia")]
680impl From<fidl::Channel> for AllocatorSynchronousProxy {
681    fn from(value: fidl::Channel) -> Self {
682        Self::new(value)
683    }
684}
685
686#[derive(Debug, Clone)]
687pub struct AllocatorProxy {
688    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
689}
690
691impl fidl::endpoints::Proxy for AllocatorProxy {
692    type Protocol = AllocatorMarker;
693
694    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
695        Self::new(inner)
696    }
697
698    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
699        self.client.into_channel().map_err(|client| Self { client })
700    }
701
702    fn as_channel(&self) -> &::fidl::AsyncChannel {
703        self.client.as_channel()
704    }
705}
706
707impl AllocatorProxy {
708    /// Create a new Proxy for fuchsia.sysmem2/Allocator.
709    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
710        let protocol_name = <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
711        Self { client: fidl::client::Client::new(channel, protocol_name) }
712    }
713
714    /// Get a Stream of events from the remote end of the protocol.
715    ///
716    /// # Panics
717    ///
718    /// Panics if the event stream was already taken.
719    pub fn take_event_stream(&self) -> AllocatorEventStream {
720        AllocatorEventStream { event_receiver: self.client.take_event_receiver() }
721    }
722
723    /// Allocates a buffer collection on behalf of a single client (aka
724    /// initiator) who is also the only participant (from the point of view of
725    /// sysmem).
726    ///
727    /// This call exists mainly for temp/testing purposes.  This call skips the
728    /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
729    /// allow another participant to specify its constraints.
730    ///
731    /// Real clients are encouraged to use
732    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
733    /// let relevant participants directly convey their own constraints to
734    /// sysmem by sending `BufferCollectionToken`s to those participants.
735    ///
736    /// + request `collection_request` The server end of the
737    ///   [`fuchsia.sysmem2/BufferCollection`].
738    pub fn r#allocate_non_shared_collection(
739        &self,
740        mut payload: AllocatorAllocateNonSharedCollectionRequest,
741    ) -> Result<(), fidl::Error> {
742        AllocatorProxyInterface::r#allocate_non_shared_collection(self, payload)
743    }
744
745    /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
746    ///
747    /// The `BufferCollectionToken` can be "duplicated" for distribution to
748    /// participants by using
749    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
750    /// `BufferCollectionToken` can be converted into a
751    /// [`fuchsia.sysmem2.BufferCollection`] using
752    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
753    ///
754    /// Buffer constraints can be set via
755    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
756    ///
757    /// Success/failure to populate the buffer collection with buffers can be
758    /// determined from
759    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
760    ///
761    /// Closing the client end of a `BufferCollectionToken` or
762    /// `BufferCollection` (without `Release` first) will fail all client ends
763    /// in the same failure domain, which by default is all client ends of the
764    /// buffer collection. See
765    /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
766    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
767    /// separate failure domains within a buffer collection.
768    pub fn r#allocate_shared_collection(
769        &self,
770        mut payload: AllocatorAllocateSharedCollectionRequest,
771    ) -> Result<(), fidl::Error> {
772        AllocatorProxyInterface::r#allocate_shared_collection(self, payload)
773    }
774
775    /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
776    /// [`fuchsia.sysmem2/BufferCollection`].
777    ///
778    /// At the time of sending this message, the buffer collection hasn't yet
779    /// been populated with buffers - the participant must first also send
780    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
781    /// `BufferCollection` client end.
782    ///
783    /// All `BufferCollectionToken`(s) duplicated from a root
784    /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
785    /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
786    /// existing `BufferCollection` client ends must have sent `SetConstraints`
787    /// before the logical BufferCollection will be populated with buffers (or
788    /// will fail if the overall set of constraints can't be satisfied).
789    ///
790    /// + request `token` The client endpoint of a channel whose server end was
791    ///   sent to sysmem using
792    ///   [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
793    ///   end was sent to sysmem using
794    ///   [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`].  The token is
795    ///   being "turned in" in exchange for a
796    ///   [`fuchsia.sysmem2/BufferCollection`].
797    /// + request `buffer_collection_request` The server end of a
798    ///   [`fuchsia.sysmem2/BufferCollection`] channel.  The sender retains the
799    ///   client end. The `BufferCollection` channel is a single participant's
800    ///   connection to the logical buffer collection. Typically there will be
801    ///   other participants with their own `BufferCollection` channel to the
802    ///   logical buffer collection.
803    pub fn r#bind_shared_collection(
804        &self,
805        mut payload: AllocatorBindSharedCollectionRequest,
806    ) -> Result<(), fidl::Error> {
807        AllocatorProxyInterface::r#bind_shared_collection(self, payload)
808    }
809
810    /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
811    /// the sysmem server.
812    ///
813    /// With this call, the client can determine whether an incoming token is a
814    /// real sysmem token that is known to the sysmem server, without any risk
815    /// of getting stuck waiting forever on a potentially fake token to complete
816    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
817    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
818    /// FIDL message). In cases where the client trusts the source of the token
819    /// to provide a real token, this call is not typically needed outside of
820    /// debugging.
821    ///
822    /// If the validate fails sometimes but succeeds other times, the source of
823    /// the token may itself not be calling
824    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
825    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
826    /// token but before sending the token to the current client. It may be more
827    /// convenient for the source to use
828    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
829    /// token(s), since that call has the sync step built in. Or, the buffer
830    /// collection may be failing before this call is processed by the sysmem
831    /// server, as buffer collection failure cleans up sysmem's tracking of
832    /// associated tokens.
833    ///
834    /// This call has no effect on any token.
835    ///
836    /// + request `token_server_koid` The koid of the server end of a channel
837    ///   that might be a BufferCollectionToken channel.  This can be obtained
838    ///   via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
839    /// - response `is_known` true means sysmem knew of the token at the time
840    ///   sysmem processed the request, but doesn't guarantee that the token is
841    ///   still valid by the time the client receives the reply. What it does
842    ///   guarantee is that the token at least was a real token, so a two-way
843    ///   call to the token won't stall forever (will fail or succeed fairly
844    ///   quickly, not stall). This can already be known implicitly if the
845    ///   source of the token can be trusted to provide a real token. A false
846    ///   value means the token wasn't known to sysmem at the time sysmem
847    ///   processed this call, but the token may have previously been valid, or
848    ///   may yet become valid. Or if the sender of the token isn't trusted to
849    ///   provide a real token, the token may be fake. It's the responsibility
850    ///   of the sender to sync with sysmem to ensure that previously
851    ///   created/duplicated token(s) are known to sysmem, before sending the
852    ///   token(s) to other participants.
853    pub fn r#validate_buffer_collection_token(
854        &self,
855        mut payload: &AllocatorValidateBufferCollectionTokenRequest,
856    ) -> fidl::client::QueryResponseFut<
857        AllocatorValidateBufferCollectionTokenResponse,
858        fidl::encoding::DefaultFuchsiaResourceDialect,
859    > {
860        AllocatorProxyInterface::r#validate_buffer_collection_token(self, payload)
861    }
862
863    /// Set information about the current client that can be used by sysmem to
864    /// help diagnose leaking memory and allocation stalls waiting for a
865    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
866    ///
867    /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
868    /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
869    /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
870    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
871    /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
872    /// these `BufferCollection`(s) have the same initial debug client info as
873    /// the token turned in to create the `BufferCollection`).
874    ///
875    /// This info can be subsequently overridden on a per-`Node` basis by
876    /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
877    ///
878    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
879    /// `Allocator` is the most efficient way to ensure that all
880    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
881    /// set, and is also more efficient than separately sending the same debug
882    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
883    /// created [`fuchsia.sysmem2/Node`].
884    ///
885    /// + request `name` This can be an arbitrary string, but the current
886    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
887    /// + request `id` This can be an arbitrary id, but the current process ID
888    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
889    pub fn r#set_debug_client_info(
890        &self,
891        mut payload: &AllocatorSetDebugClientInfoRequest,
892    ) -> Result<(), fidl::Error> {
893        AllocatorProxyInterface::r#set_debug_client_info(self, payload)
894    }
895
896    /// Given a handle to a sysmem-provided VMO, this returns additional info
897    /// about the corresponding sysmem logical buffer.
898    ///
899    /// Most callers will duplicate a VMO handle first and send the duplicate to
900    /// this call.
901    ///
902    /// If the client has created a child VMO of a sysmem-provided VMO, that
903    /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
904    ///
905    /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
906    /// - response `buffer_collection_id` The buffer collection ID, which is
907    ///   unique per logical buffer collection per boot.
908    /// - response `buffer_index` The buffer index of the buffer within the
909    ///   buffer collection. This is the same as the index of the buffer within
910    ///   [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
911    ///   is the same for all sysmem-delivered VMOs corresponding to the same
912    ///   logical buffer, even if the VMO koids differ. The `buffer_index` is
913    ///   only unique across buffers of a buffer collection. For a given buffer,
914    ///   the combination of `buffer_collection_id` and `buffer_index` is unique
915    ///   per boot.
916    /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
917    ///   the `close_weak_asap` field will be set in the response. This handle
918    ///   will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
919    ///   the buffer should be closed as soon as possible. This is signalled
920    ///   shortly after all strong sysmem VMOs to the buffer are closed
921    ///   (including any held indirectly via strong `BufferCollectionToken` or
922    ///   strong `BufferCollection`). Failure to close all weak sysmem VMO
923    ///   handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
924    ///   considered a VMO leak caused by the client still holding a weak sysmem
925    ///   VMO handle and results in loud complaints to the log by sysmem. The
926    ///   buffers of a collection can be freed independently of each other. The
927    ///   `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
928    ///   response arrives at the client. A client that isn't prepared to handle
929    ///   weak sysmem VMOs, on seeing this field set, can close all handles to
930    ///   the buffer and fail any associated request.
931    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
932    ///   VMO. Both strong and weak sysmem VMOs can be passed to this call, and
933    ///   the VMO handle passed in to this call itself keeps the VMO's info
934    ///   alive for purposes of responding to this call. Because of this,
935    ///   ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
936    ///   handles to the VMO when calling; even if other handles are closed
937    ///   before the GetVmoInfo response arrives at the client).
938    /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
939    ///   capable of being used with GetVmoInfo due to rights/capability
940    ///   attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
941    ///   topic [`ZX_INFO_HANDLE_BASIC`].
942    /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
943    ///   unspecified reason. See the log for more info.
944    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
945    ///   wasn't set, or there was some other problem with the request field(s).
946    pub fn r#get_vmo_info(
947        &self,
948        mut payload: AllocatorGetVmoInfoRequest,
949    ) -> fidl::client::QueryResponseFut<
950        AllocatorGetVmoInfoResult,
951        fidl::encoding::DefaultFuchsiaResourceDialect,
952    > {
953        AllocatorProxyInterface::r#get_vmo_info(self, payload)
954    }
955}
956
957impl AllocatorProxyInterface for AllocatorProxy {
958    fn r#allocate_non_shared_collection(
959        &self,
960        mut payload: AllocatorAllocateNonSharedCollectionRequest,
961    ) -> Result<(), fidl::Error> {
962        self.client.send::<AllocatorAllocateNonSharedCollectionRequest>(
963            &mut payload,
964            0x5ca681f025a80e44,
965            fidl::encoding::DynamicFlags::FLEXIBLE,
966        )
967    }
968
969    fn r#allocate_shared_collection(
970        &self,
971        mut payload: AllocatorAllocateSharedCollectionRequest,
972    ) -> Result<(), fidl::Error> {
973        self.client.send::<AllocatorAllocateSharedCollectionRequest>(
974            &mut payload,
975            0x11a19ff51f0b49c1,
976            fidl::encoding::DynamicFlags::FLEXIBLE,
977        )
978    }
979
980    fn r#bind_shared_collection(
981        &self,
982        mut payload: AllocatorBindSharedCollectionRequest,
983    ) -> Result<(), fidl::Error> {
984        self.client.send::<AllocatorBindSharedCollectionRequest>(
985            &mut payload,
986            0x550916b0dc1d5b4e,
987            fidl::encoding::DynamicFlags::FLEXIBLE,
988        )
989    }
990
991    type ValidateBufferCollectionTokenResponseFut = fidl::client::QueryResponseFut<
992        AllocatorValidateBufferCollectionTokenResponse,
993        fidl::encoding::DefaultFuchsiaResourceDialect,
994    >;
995    fn r#validate_buffer_collection_token(
996        &self,
997        mut payload: &AllocatorValidateBufferCollectionTokenRequest,
998    ) -> Self::ValidateBufferCollectionTokenResponseFut {
999        fn _decode(
1000            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
1001        ) -> Result<AllocatorValidateBufferCollectionTokenResponse, fidl::Error> {
1002            let _response = fidl::client::decode_transaction_body::<
1003                fidl::encoding::FlexibleType<AllocatorValidateBufferCollectionTokenResponse>,
1004                fidl::encoding::DefaultFuchsiaResourceDialect,
1005                0x4c5ee91b02a7e68d,
1006            >(_buf?)?
1007            .into_result::<AllocatorMarker>("validate_buffer_collection_token")?;
1008            Ok(_response)
1009        }
1010        self.client.send_query_and_decode::<
1011            AllocatorValidateBufferCollectionTokenRequest,
1012            AllocatorValidateBufferCollectionTokenResponse,
1013        >(
1014            payload,
1015            0x4c5ee91b02a7e68d,
1016            fidl::encoding::DynamicFlags::FLEXIBLE,
1017            _decode,
1018        )
1019    }
1020
1021    fn r#set_debug_client_info(
1022        &self,
1023        mut payload: &AllocatorSetDebugClientInfoRequest,
1024    ) -> Result<(), fidl::Error> {
1025        self.client.send::<AllocatorSetDebugClientInfoRequest>(
1026            payload,
1027            0x6f68f19a3f509c4d,
1028            fidl::encoding::DynamicFlags::FLEXIBLE,
1029        )
1030    }
1031
1032    type GetVmoInfoResponseFut = fidl::client::QueryResponseFut<
1033        AllocatorGetVmoInfoResult,
1034        fidl::encoding::DefaultFuchsiaResourceDialect,
1035    >;
1036    fn r#get_vmo_info(
1037        &self,
1038        mut payload: AllocatorGetVmoInfoRequest,
1039    ) -> Self::GetVmoInfoResponseFut {
1040        fn _decode(
1041            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
1042        ) -> Result<AllocatorGetVmoInfoResult, fidl::Error> {
1043            let _response = fidl::client::decode_transaction_body::<
1044                fidl::encoding::FlexibleResultType<AllocatorGetVmoInfoResponse, Error>,
1045                fidl::encoding::DefaultFuchsiaResourceDialect,
1046                0x21a881120aa0ddf9,
1047            >(_buf?)?
1048            .into_result::<AllocatorMarker>("get_vmo_info")?;
1049            Ok(_response.map(|x| x))
1050        }
1051        self.client.send_query_and_decode::<AllocatorGetVmoInfoRequest, AllocatorGetVmoInfoResult>(
1052            &mut payload,
1053            0x21a881120aa0ddf9,
1054            fidl::encoding::DynamicFlags::FLEXIBLE,
1055            _decode,
1056        )
1057    }
1058}
1059
1060pub struct AllocatorEventStream {
1061    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
1062}
1063
1064impl std::marker::Unpin for AllocatorEventStream {}
1065
1066impl futures::stream::FusedStream for AllocatorEventStream {
1067    fn is_terminated(&self) -> bool {
1068        self.event_receiver.is_terminated()
1069    }
1070}
1071
1072impl futures::Stream for AllocatorEventStream {
1073    type Item = Result<AllocatorEvent, fidl::Error>;
1074
1075    fn poll_next(
1076        mut self: std::pin::Pin<&mut Self>,
1077        cx: &mut std::task::Context<'_>,
1078    ) -> std::task::Poll<Option<Self::Item>> {
1079        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
1080            &mut self.event_receiver,
1081            cx
1082        )?) {
1083            Some(buf) => std::task::Poll::Ready(Some(AllocatorEvent::decode(buf))),
1084            None => std::task::Poll::Ready(None),
1085        }
1086    }
1087}
1088
1089#[derive(Debug)]
1090pub enum AllocatorEvent {
1091    #[non_exhaustive]
1092    _UnknownEvent {
1093        /// Ordinal of the event that was sent.
1094        ordinal: u64,
1095    },
1096}
1097
1098impl AllocatorEvent {
1099    /// Decodes a message buffer as a [`AllocatorEvent`].
1100    fn decode(
1101        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
1102    ) -> Result<AllocatorEvent, fidl::Error> {
1103        let (bytes, _handles) = buf.split_mut();
1104        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
1105        debug_assert_eq!(tx_header.tx_id, 0);
1106        match tx_header.ordinal {
1107            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
1108                Ok(AllocatorEvent::_UnknownEvent { ordinal: tx_header.ordinal })
1109            }
1110            _ => Err(fidl::Error::UnknownOrdinal {
1111                ordinal: tx_header.ordinal,
1112                protocol_name: <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
1113            }),
1114        }
1115    }
1116}
1117
1118/// A Stream of incoming requests for fuchsia.sysmem2/Allocator.
1119pub struct AllocatorRequestStream {
1120    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1121    is_terminated: bool,
1122}
1123
1124impl std::marker::Unpin for AllocatorRequestStream {}
1125
1126impl futures::stream::FusedStream for AllocatorRequestStream {
1127    fn is_terminated(&self) -> bool {
1128        self.is_terminated
1129    }
1130}
1131
1132impl fidl::endpoints::RequestStream for AllocatorRequestStream {
1133    type Protocol = AllocatorMarker;
1134    type ControlHandle = AllocatorControlHandle;
1135
1136    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
1137        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
1138    }
1139
1140    fn control_handle(&self) -> Self::ControlHandle {
1141        AllocatorControlHandle { inner: self.inner.clone() }
1142    }
1143
1144    fn into_inner(
1145        self,
1146    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
1147    {
1148        (self.inner, self.is_terminated)
1149    }
1150
1151    fn from_inner(
1152        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1153        is_terminated: bool,
1154    ) -> Self {
1155        Self { inner, is_terminated }
1156    }
1157}
1158
1159impl futures::Stream for AllocatorRequestStream {
1160    type Item = Result<AllocatorRequest, fidl::Error>;
1161
1162    fn poll_next(
1163        mut self: std::pin::Pin<&mut Self>,
1164        cx: &mut std::task::Context<'_>,
1165    ) -> std::task::Poll<Option<Self::Item>> {
1166        let this = &mut *self;
1167        if this.inner.check_shutdown(cx) {
1168            this.is_terminated = true;
1169            return std::task::Poll::Ready(None);
1170        }
1171        if this.is_terminated {
1172            panic!("polled AllocatorRequestStream after completion");
1173        }
1174        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
1175            |bytes, handles| {
1176                match this.inner.channel().read_etc(cx, bytes, handles) {
1177                    std::task::Poll::Ready(Ok(())) => {}
1178                    std::task::Poll::Pending => return std::task::Poll::Pending,
1179                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
1180                        this.is_terminated = true;
1181                        return std::task::Poll::Ready(None);
1182                    }
1183                    std::task::Poll::Ready(Err(e)) => {
1184                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
1185                            e.into(),
1186                        ))))
1187                    }
1188                }
1189
1190                // A message has been received from the channel
1191                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
1192
1193                std::task::Poll::Ready(Some(match header.ordinal {
1194                    0x5ca681f025a80e44 => {
1195                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1196                        let mut req = fidl::new_empty!(
1197                            AllocatorAllocateNonSharedCollectionRequest,
1198                            fidl::encoding::DefaultFuchsiaResourceDialect
1199                        );
1200                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorAllocateNonSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1201                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1202                        Ok(AllocatorRequest::AllocateNonSharedCollection {
1203                            payload: req,
1204                            control_handle,
1205                        })
1206                    }
1207                    0x11a19ff51f0b49c1 => {
1208                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1209                        let mut req = fidl::new_empty!(
1210                            AllocatorAllocateSharedCollectionRequest,
1211                            fidl::encoding::DefaultFuchsiaResourceDialect
1212                        );
1213                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorAllocateSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1214                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1215                        Ok(AllocatorRequest::AllocateSharedCollection {
1216                            payload: req,
1217                            control_handle,
1218                        })
1219                    }
1220                    0x550916b0dc1d5b4e => {
1221                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1222                        let mut req = fidl::new_empty!(
1223                            AllocatorBindSharedCollectionRequest,
1224                            fidl::encoding::DefaultFuchsiaResourceDialect
1225                        );
1226                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorBindSharedCollectionRequest>(&header, _body_bytes, handles, &mut req)?;
1227                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1228                        Ok(AllocatorRequest::BindSharedCollection { payload: req, control_handle })
1229                    }
1230                    0x4c5ee91b02a7e68d => {
1231                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
1232                        let mut req = fidl::new_empty!(
1233                            AllocatorValidateBufferCollectionTokenRequest,
1234                            fidl::encoding::DefaultFuchsiaResourceDialect
1235                        );
1236                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorValidateBufferCollectionTokenRequest>(&header, _body_bytes, handles, &mut req)?;
1237                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1238                        Ok(AllocatorRequest::ValidateBufferCollectionToken {
1239                            payload: req,
1240                            responder: AllocatorValidateBufferCollectionTokenResponder {
1241                                control_handle: std::mem::ManuallyDrop::new(control_handle),
1242                                tx_id: header.tx_id,
1243                            },
1244                        })
1245                    }
1246                    0x6f68f19a3f509c4d => {
1247                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
1248                        let mut req = fidl::new_empty!(
1249                            AllocatorSetDebugClientInfoRequest,
1250                            fidl::encoding::DefaultFuchsiaResourceDialect
1251                        );
1252                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
1253                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1254                        Ok(AllocatorRequest::SetDebugClientInfo { payload: req, control_handle })
1255                    }
1256                    0x21a881120aa0ddf9 => {
1257                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
1258                        let mut req = fidl::new_empty!(
1259                            AllocatorGetVmoInfoRequest,
1260                            fidl::encoding::DefaultFuchsiaResourceDialect
1261                        );
1262                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<AllocatorGetVmoInfoRequest>(&header, _body_bytes, handles, &mut req)?;
1263                        let control_handle = AllocatorControlHandle { inner: this.inner.clone() };
1264                        Ok(AllocatorRequest::GetVmoInfo {
1265                            payload: req,
1266                            responder: AllocatorGetVmoInfoResponder {
1267                                control_handle: std::mem::ManuallyDrop::new(control_handle),
1268                                tx_id: header.tx_id,
1269                            },
1270                        })
1271                    }
1272                    _ if header.tx_id == 0
1273                        && header
1274                            .dynamic_flags()
1275                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
1276                    {
1277                        Ok(AllocatorRequest::_UnknownMethod {
1278                            ordinal: header.ordinal,
1279                            control_handle: AllocatorControlHandle { inner: this.inner.clone() },
1280                            method_type: fidl::MethodType::OneWay,
1281                        })
1282                    }
1283                    _ if header
1284                        .dynamic_flags()
1285                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
1286                    {
1287                        this.inner.send_framework_err(
1288                            fidl::encoding::FrameworkErr::UnknownMethod,
1289                            header.tx_id,
1290                            header.ordinal,
1291                            header.dynamic_flags(),
1292                            (bytes, handles),
1293                        )?;
1294                        Ok(AllocatorRequest::_UnknownMethod {
1295                            ordinal: header.ordinal,
1296                            control_handle: AllocatorControlHandle { inner: this.inner.clone() },
1297                            method_type: fidl::MethodType::TwoWay,
1298                        })
1299                    }
1300                    _ => Err(fidl::Error::UnknownOrdinal {
1301                        ordinal: header.ordinal,
1302                        protocol_name:
1303                            <AllocatorMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
1304                    }),
1305                }))
1306            },
1307        )
1308    }
1309}
1310
1311/// Allocates system memory buffers.
1312///
1313/// Epitaphs are not used in this protocol.
1314#[derive(Debug)]
1315pub enum AllocatorRequest {
1316    /// Allocates a buffer collection on behalf of a single client (aka
1317    /// initiator) who is also the only participant (from the point of view of
1318    /// sysmem).
1319    ///
1320    /// This call exists mainly for temp/testing purposes.  This call skips the
1321    /// [`fuchsia.sysmem2/BufferCollectionToken`] stage, so there's no way to
1322    /// allow another participant to specify its constraints.
1323    ///
1324    /// Real clients are encouraged to use
1325    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] instead, and to
1326    /// let relevant participants directly convey their own constraints to
1327    /// sysmem by sending `BufferCollectionToken`s to those participants.
1328    ///
1329    /// + request `collection_request` The server end of the
1330    ///   [`fuchsia.sysmem2/BufferCollection`].
1331    AllocateNonSharedCollection {
1332        payload: AllocatorAllocateNonSharedCollectionRequest,
1333        control_handle: AllocatorControlHandle,
1334    },
1335    /// Creates a root [`fuchsia.sysmem2/BufferCollectionToken`].
1336    ///
1337    /// The `BufferCollectionToken` can be "duplicated" for distribution to
1338    /// participants by using
1339    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`]. Each
1340    /// `BufferCollectionToken` can be converted into a
1341    /// [`fuchsia.sysmem2.BufferCollection`] using
1342    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`].
1343    ///
1344    /// Buffer constraints can be set via
1345    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
1346    ///
1347    /// Success/failure to populate the buffer collection with buffers can be
1348    /// determined from
1349    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
1350    ///
1351    /// Closing the client end of a `BufferCollectionToken` or
1352    /// `BufferCollection` (without `Release` first) will fail all client ends
1353    /// in the same failure domain, which by default is all client ends of the
1354    /// buffer collection. See
1355    /// [`fuchsia.sysmem2/BufferCollection.SetDispensable`] and
1356    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`] for ways to create
1357    /// separate failure domains within a buffer collection.
1358    AllocateSharedCollection {
1359        payload: AllocatorAllocateSharedCollectionRequest,
1360        control_handle: AllocatorControlHandle,
1361    },
1362    /// Convert a [`fuchsia.sysmem2/BufferCollectionToken`] into a
1363    /// [`fuchsia.sysmem2/BufferCollection`].
1364    ///
1365    /// At the time of sending this message, the buffer collection hasn't yet
1366    /// been populated with buffers - the participant must first also send
1367    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] via the
1368    /// `BufferCollection` client end.
1369    ///
1370    /// All `BufferCollectionToken`(s) duplicated from a root
1371    /// `BufferCollectionToken` (created via `AllocateSharedCollection`) must be
1372    /// "turned in" via `BindSharedCollection` (or `Release`ed), and all
1373    /// existing `BufferCollection` client ends must have sent `SetConstraints`
1374    /// before the logical BufferCollection will be populated with buffers (or
1375    /// will fail if the overall set of constraints can't be satisfied).
1376    ///
1377    /// + request `token` The client endpoint of a channel whose server end was
1378    ///   sent to sysmem using
1379    ///   [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`] or whose server
1380    ///   end was sent to sysmem using
1381    ///   [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`].  The token is
1382    ///   being "turned in" in exchange for a
1383    ///   [`fuchsia.sysmem2/BufferCollection`].
1384    /// + request `buffer_collection_request` The server end of a
1385    ///   [`fuchsia.sysmem2/BufferCollection`] channel.  The sender retains the
1386    ///   client end. The `BufferCollection` channel is a single participant's
1387    ///   connection to the logical buffer collection. Typically there will be
1388    ///   other participants with their own `BufferCollection` channel to the
1389    ///   logical buffer collection.
1390    BindSharedCollection {
1391        payload: AllocatorBindSharedCollectionRequest,
1392        control_handle: AllocatorControlHandle,
1393    },
1394    /// Checks whether a [`fuchsia.sysmem2/BufferCollectionToken`] is known to
1395    /// the sysmem server.
1396    ///
1397    /// With this call, the client can determine whether an incoming token is a
1398    /// real sysmem token that is known to the sysmem server, without any risk
1399    /// of getting stuck waiting forever on a potentially fake token to complete
1400    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or
1401    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] (or any other two-way
1402    /// FIDL message). In cases where the client trusts the source of the token
1403    /// to provide a real token, this call is not typically needed outside of
1404    /// debugging.
1405    ///
1406    /// If the validate fails sometimes but succeeds other times, the source of
1407    /// the token may itself not be calling
1408    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] or
1409    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after creating/duplicating the
1410    /// token but before sending the token to the current client. It may be more
1411    /// convenient for the source to use
1412    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] to duplicate
1413    /// token(s), since that call has the sync step built in. Or, the buffer
1414    /// collection may be failing before this call is processed by the sysmem
1415    /// server, as buffer collection failure cleans up sysmem's tracking of
1416    /// associated tokens.
1417    ///
1418    /// This call has no effect on any token.
1419    ///
1420    /// + request `token_server_koid` The koid of the server end of a channel
1421    ///   that might be a BufferCollectionToken channel.  This can be obtained
1422    ///   via `zx_object_get_info` `ZX_INFO_HANDLE_BASIC` `related_koid`.
1423    /// - response `is_known` true means sysmem knew of the token at the time
1424    ///   sysmem processed the request, but doesn't guarantee that the token is
1425    ///   still valid by the time the client receives the reply. What it does
1426    ///   guarantee is that the token at least was a real token, so a two-way
1427    ///   call to the token won't stall forever (will fail or succeed fairly
1428    ///   quickly, not stall). This can already be known implicitly if the
1429    ///   source of the token can be trusted to provide a real token. A false
1430    ///   value means the token wasn't known to sysmem at the time sysmem
1431    ///   processed this call, but the token may have previously been valid, or
1432    ///   may yet become valid. Or if the sender of the token isn't trusted to
1433    ///   provide a real token, the token may be fake. It's the responsibility
1434    ///   of the sender to sync with sysmem to ensure that previously
1435    ///   created/duplicated token(s) are known to sysmem, before sending the
1436    ///   token(s) to other participants.
1437    ValidateBufferCollectionToken {
1438        payload: AllocatorValidateBufferCollectionTokenRequest,
1439        responder: AllocatorValidateBufferCollectionTokenResponder,
1440    },
1441    /// Set information about the current client that can be used by sysmem to
1442    /// help diagnose leaking memory and allocation stalls waiting for a
1443    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
1444    ///
1445    /// This sets the debug client info on all [`fuchsia.sysmem2/Node`](s)
1446    /// subsequently created by this this [`fuchsia.sysmem2/Allocator`]
1447    /// including any [`fuchsia.sysmem2/BufferCollection`](s) created via
1448    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] (in the absence of
1449    /// any prior call to [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`],
1450    /// these `BufferCollection`(s) have the same initial debug client info as
1451    /// the token turned in to create the `BufferCollection`).
1452    ///
1453    /// This info can be subsequently overridden on a per-`Node` basis by
1454    /// sending [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
1455    ///
1456    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
1457    /// `Allocator` is the most efficient way to ensure that all
1458    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
1459    /// set, and is also more efficient than separately sending the same debug
1460    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
1461    /// created [`fuchsia.sysmem2/Node`].
1462    ///
1463    /// + request `name` This can be an arbitrary string, but the current
1464    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
1465    /// + request `id` This can be an arbitrary id, but the current process ID
1466    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
1467    SetDebugClientInfo {
1468        payload: AllocatorSetDebugClientInfoRequest,
1469        control_handle: AllocatorControlHandle,
1470    },
1471    /// Given a handle to a sysmem-provided VMO, this returns additional info
1472    /// about the corresponding sysmem logical buffer.
1473    ///
1474    /// Most callers will duplicate a VMO handle first and send the duplicate to
1475    /// this call.
1476    ///
1477    /// If the client has created a child VMO of a sysmem-provided VMO, that
1478    /// child VMO isn't considered a "sysmem VMO" for purposes of this call.
1479    ///
1480    /// + request `vmo` A handle to a sysmem-provided VMO (or see errors).
1481    /// - response `buffer_collection_id` The buffer collection ID, which is
1482    ///   unique per logical buffer collection per boot.
1483    /// - response `buffer_index` The buffer index of the buffer within the
1484    ///   buffer collection. This is the same as the index of the buffer within
1485    ///   [`fuchsia.sysmem2/BufferCollectionInfo.buffers`]. The `buffer_index`
1486    ///   is the same for all sysmem-delivered VMOs corresponding to the same
1487    ///   logical buffer, even if the VMO koids differ. The `buffer_index` is
1488    ///   only unique across buffers of a buffer collection. For a given buffer,
1489    ///   the combination of `buffer_collection_id` and `buffer_index` is unique
1490    ///   per boot.
1491    /// - response `close_weak_asap` Iff `vmo` is a handle to a weak sysmem VMO,
1492    ///   the `close_weak_asap` field will be set in the response. This handle
1493    ///   will signal `ZX_EVENTPAIR_PEER_CLOSED` when all weak VMO handles to
1494    ///   the buffer should be closed as soon as possible. This is signalled
1495    ///   shortly after all strong sysmem VMOs to the buffer are closed
1496    ///   (including any held indirectly via strong `BufferCollectionToken` or
1497    ///   strong `BufferCollection`). Failure to close all weak sysmem VMO
1498    ///   handles to the buffer quickly upon `ZX_EVENTPAIR_PEER_CLOSED` is
1499    ///   considered a VMO leak caused by the client still holding a weak sysmem
1500    ///   VMO handle and results in loud complaints to the log by sysmem. The
1501    ///   buffers of a collection can be freed independently of each other. The
1502    ///   `ZX_EVENTPAIR_PEER_CLOSED` may already be signalled before the
1503    ///   response arrives at the client. A client that isn't prepared to handle
1504    ///   weak sysmem VMOs, on seeing this field set, can close all handles to
1505    ///   the buffer and fail any associated request.
1506    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` - the vmo isn't a sysmem
1507    ///   VMO. Both strong and weak sysmem VMOs can be passed to this call, and
1508    ///   the VMO handle passed in to this call itself keeps the VMO's info
1509    ///   alive for purposes of responding to this call. Because of this,
1510    ///   ZX_ERR_NOT_FOUND errors are unambiguous (even if there are no other
1511    ///   handles to the VMO when calling; even if other handles are closed
1512    ///   before the GetVmoInfo response arrives at the client).
1513    /// * error `[fuchsia.sysmem2/Error.HANDLE_ACCESS_DENIED]` The vmo isn't
1514    ///   capable of being used with GetVmoInfo due to rights/capability
1515    ///   attenuation. The VMO needs to be usable with [`zx_vmo_get_info`] with
1516    ///   topic [`ZX_INFO_HANDLE_BASIC`].
1517    /// * error `[fuchsia.sysmem2/Error.UNSPECIFIED]` The request failed for an
1518    ///   unspecified reason. See the log for more info.
1519    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The vmo field
1520    ///   wasn't set, or there was some other problem with the request field(s).
1521    GetVmoInfo { payload: AllocatorGetVmoInfoRequest, responder: AllocatorGetVmoInfoResponder },
1522    /// An interaction was received which does not match any known method.
1523    #[non_exhaustive]
1524    _UnknownMethod {
1525        /// Ordinal of the method that was called.
1526        ordinal: u64,
1527        control_handle: AllocatorControlHandle,
1528        method_type: fidl::MethodType,
1529    },
1530}
1531
1532impl AllocatorRequest {
1533    #[allow(irrefutable_let_patterns)]
1534    pub fn into_allocate_non_shared_collection(
1535        self,
1536    ) -> Option<(AllocatorAllocateNonSharedCollectionRequest, AllocatorControlHandle)> {
1537        if let AllocatorRequest::AllocateNonSharedCollection { payload, control_handle } = self {
1538            Some((payload, control_handle))
1539        } else {
1540            None
1541        }
1542    }
1543
1544    #[allow(irrefutable_let_patterns)]
1545    pub fn into_allocate_shared_collection(
1546        self,
1547    ) -> Option<(AllocatorAllocateSharedCollectionRequest, AllocatorControlHandle)> {
1548        if let AllocatorRequest::AllocateSharedCollection { payload, control_handle } = self {
1549            Some((payload, control_handle))
1550        } else {
1551            None
1552        }
1553    }
1554
1555    #[allow(irrefutable_let_patterns)]
1556    pub fn into_bind_shared_collection(
1557        self,
1558    ) -> Option<(AllocatorBindSharedCollectionRequest, AllocatorControlHandle)> {
1559        if let AllocatorRequest::BindSharedCollection { payload, control_handle } = self {
1560            Some((payload, control_handle))
1561        } else {
1562            None
1563        }
1564    }
1565
1566    #[allow(irrefutable_let_patterns)]
1567    pub fn into_validate_buffer_collection_token(
1568        self,
1569    ) -> Option<(
1570        AllocatorValidateBufferCollectionTokenRequest,
1571        AllocatorValidateBufferCollectionTokenResponder,
1572    )> {
1573        if let AllocatorRequest::ValidateBufferCollectionToken { payload, responder } = self {
1574            Some((payload, responder))
1575        } else {
1576            None
1577        }
1578    }
1579
1580    #[allow(irrefutable_let_patterns)]
1581    pub fn into_set_debug_client_info(
1582        self,
1583    ) -> Option<(AllocatorSetDebugClientInfoRequest, AllocatorControlHandle)> {
1584        if let AllocatorRequest::SetDebugClientInfo { payload, control_handle } = self {
1585            Some((payload, control_handle))
1586        } else {
1587            None
1588        }
1589    }
1590
1591    #[allow(irrefutable_let_patterns)]
1592    pub fn into_get_vmo_info(
1593        self,
1594    ) -> Option<(AllocatorGetVmoInfoRequest, AllocatorGetVmoInfoResponder)> {
1595        if let AllocatorRequest::GetVmoInfo { payload, responder } = self {
1596            Some((payload, responder))
1597        } else {
1598            None
1599        }
1600    }
1601
1602    /// Name of the method defined in FIDL
1603    pub fn method_name(&self) -> &'static str {
1604        match *self {
1605            AllocatorRequest::AllocateNonSharedCollection { .. } => {
1606                "allocate_non_shared_collection"
1607            }
1608            AllocatorRequest::AllocateSharedCollection { .. } => "allocate_shared_collection",
1609            AllocatorRequest::BindSharedCollection { .. } => "bind_shared_collection",
1610            AllocatorRequest::ValidateBufferCollectionToken { .. } => {
1611                "validate_buffer_collection_token"
1612            }
1613            AllocatorRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
1614            AllocatorRequest::GetVmoInfo { .. } => "get_vmo_info",
1615            AllocatorRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
1616                "unknown one-way method"
1617            }
1618            AllocatorRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
1619                "unknown two-way method"
1620            }
1621        }
1622    }
1623}
1624
1625#[derive(Debug, Clone)]
1626pub struct AllocatorControlHandle {
1627    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
1628}
1629
1630impl fidl::endpoints::ControlHandle for AllocatorControlHandle {
1631    fn shutdown(&self) {
1632        self.inner.shutdown()
1633    }
1634    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
1635        self.inner.shutdown_with_epitaph(status)
1636    }
1637
1638    fn is_closed(&self) -> bool {
1639        self.inner.channel().is_closed()
1640    }
1641    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
1642        self.inner.channel().on_closed()
1643    }
1644
1645    #[cfg(target_os = "fuchsia")]
1646    fn signal_peer(
1647        &self,
1648        clear_mask: zx::Signals,
1649        set_mask: zx::Signals,
1650    ) -> Result<(), zx_status::Status> {
1651        use fidl::Peered;
1652        self.inner.channel().signal_peer(clear_mask, set_mask)
1653    }
1654}
1655
1656impl AllocatorControlHandle {}
1657
1658#[must_use = "FIDL methods require a response to be sent"]
1659#[derive(Debug)]
1660pub struct AllocatorValidateBufferCollectionTokenResponder {
1661    control_handle: std::mem::ManuallyDrop<AllocatorControlHandle>,
1662    tx_id: u32,
1663}
1664
1665/// Set the the channel to be shutdown (see [`AllocatorControlHandle::shutdown`])
1666/// if the responder is dropped without sending a response, so that the client
1667/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
1668impl std::ops::Drop for AllocatorValidateBufferCollectionTokenResponder {
1669    fn drop(&mut self) {
1670        self.control_handle.shutdown();
1671        // Safety: drops once, never accessed again
1672        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1673    }
1674}
1675
1676impl fidl::endpoints::Responder for AllocatorValidateBufferCollectionTokenResponder {
1677    type ControlHandle = AllocatorControlHandle;
1678
1679    fn control_handle(&self) -> &AllocatorControlHandle {
1680        &self.control_handle
1681    }
1682
1683    fn drop_without_shutdown(mut self) {
1684        // Safety: drops once, never accessed again due to mem::forget
1685        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1686        // Prevent Drop from running (which would shut down the channel)
1687        std::mem::forget(self);
1688    }
1689}
1690
1691impl AllocatorValidateBufferCollectionTokenResponder {
1692    /// Sends a response to the FIDL transaction.
1693    ///
1694    /// Sets the channel to shutdown if an error occurs.
1695    pub fn send(
1696        self,
1697        mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1698    ) -> Result<(), fidl::Error> {
1699        let _result = self.send_raw(payload);
1700        if _result.is_err() {
1701            self.control_handle.shutdown();
1702        }
1703        self.drop_without_shutdown();
1704        _result
1705    }
1706
1707    /// Similar to "send" but does not shutdown the channel if an error occurs.
1708    pub fn send_no_shutdown_on_err(
1709        self,
1710        mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1711    ) -> Result<(), fidl::Error> {
1712        let _result = self.send_raw(payload);
1713        self.drop_without_shutdown();
1714        _result
1715    }
1716
1717    fn send_raw(
1718        &self,
1719        mut payload: &AllocatorValidateBufferCollectionTokenResponse,
1720    ) -> Result<(), fidl::Error> {
1721        self.control_handle.inner.send::<fidl::encoding::FlexibleType<
1722            AllocatorValidateBufferCollectionTokenResponse,
1723        >>(
1724            fidl::encoding::Flexible::new(payload),
1725            self.tx_id,
1726            0x4c5ee91b02a7e68d,
1727            fidl::encoding::DynamicFlags::FLEXIBLE,
1728        )
1729    }
1730}
1731
1732#[must_use = "FIDL methods require a response to be sent"]
1733#[derive(Debug)]
1734pub struct AllocatorGetVmoInfoResponder {
1735    control_handle: std::mem::ManuallyDrop<AllocatorControlHandle>,
1736    tx_id: u32,
1737}
1738
1739/// Set the the channel to be shutdown (see [`AllocatorControlHandle::shutdown`])
1740/// if the responder is dropped without sending a response, so that the client
1741/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
1742impl std::ops::Drop for AllocatorGetVmoInfoResponder {
1743    fn drop(&mut self) {
1744        self.control_handle.shutdown();
1745        // Safety: drops once, never accessed again
1746        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1747    }
1748}
1749
1750impl fidl::endpoints::Responder for AllocatorGetVmoInfoResponder {
1751    type ControlHandle = AllocatorControlHandle;
1752
1753    fn control_handle(&self) -> &AllocatorControlHandle {
1754        &self.control_handle
1755    }
1756
1757    fn drop_without_shutdown(mut self) {
1758        // Safety: drops once, never accessed again due to mem::forget
1759        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
1760        // Prevent Drop from running (which would shut down the channel)
1761        std::mem::forget(self);
1762    }
1763}
1764
1765impl AllocatorGetVmoInfoResponder {
1766    /// Sends a response to the FIDL transaction.
1767    ///
1768    /// Sets the channel to shutdown if an error occurs.
1769    pub fn send(
1770        self,
1771        mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1772    ) -> Result<(), fidl::Error> {
1773        let _result = self.send_raw(result);
1774        if _result.is_err() {
1775            self.control_handle.shutdown();
1776        }
1777        self.drop_without_shutdown();
1778        _result
1779    }
1780
1781    /// Similar to "send" but does not shutdown the channel if an error occurs.
1782    pub fn send_no_shutdown_on_err(
1783        self,
1784        mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1785    ) -> Result<(), fidl::Error> {
1786        let _result = self.send_raw(result);
1787        self.drop_without_shutdown();
1788        _result
1789    }
1790
1791    fn send_raw(
1792        &self,
1793        mut result: Result<AllocatorGetVmoInfoResponse, Error>,
1794    ) -> Result<(), fidl::Error> {
1795        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
1796            AllocatorGetVmoInfoResponse,
1797            Error,
1798        >>(
1799            fidl::encoding::FlexibleResult::new(result.as_mut().map_err(|e| *e)),
1800            self.tx_id,
1801            0x21a881120aa0ddf9,
1802            fidl::encoding::DynamicFlags::FLEXIBLE,
1803        )
1804    }
1805}
1806
1807#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
1808pub struct BufferCollectionMarker;
1809
1810impl fidl::endpoints::ProtocolMarker for BufferCollectionMarker {
1811    type Proxy = BufferCollectionProxy;
1812    type RequestStream = BufferCollectionRequestStream;
1813    #[cfg(target_os = "fuchsia")]
1814    type SynchronousProxy = BufferCollectionSynchronousProxy;
1815
1816    const DEBUG_NAME: &'static str = "(anonymous) BufferCollection";
1817}
1818pub type BufferCollectionWaitForAllBuffersAllocatedResult =
1819    Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>;
1820pub type BufferCollectionCheckAllBuffersAllocatedResult = Result<(), Error>;
1821
1822pub trait BufferCollectionProxyInterface: Send + Sync {
1823    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
1824    fn r#sync(&self) -> Self::SyncResponseFut;
1825    fn r#release(&self) -> Result<(), fidl::Error>;
1826    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
1827    fn r#set_debug_client_info(
1828        &self,
1829        payload: &NodeSetDebugClientInfoRequest,
1830    ) -> Result<(), fidl::Error>;
1831    fn r#set_debug_timeout_log_deadline(
1832        &self,
1833        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
1834    ) -> Result<(), fidl::Error>;
1835    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
1836    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
1837        + Send;
1838    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
1839    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
1840        + Send;
1841    fn r#is_alternate_for(
1842        &self,
1843        payload: NodeIsAlternateForRequest,
1844    ) -> Self::IsAlternateForResponseFut;
1845    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
1846        + Send;
1847    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
1848    fn r#set_weak(&self) -> Result<(), fidl::Error>;
1849    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
1850    fn r#attach_node_tracking(
1851        &self,
1852        payload: NodeAttachNodeTrackingRequest,
1853    ) -> Result<(), fidl::Error>;
1854    fn r#set_constraints(
1855        &self,
1856        payload: BufferCollectionSetConstraintsRequest,
1857    ) -> Result<(), fidl::Error>;
1858    type WaitForAllBuffersAllocatedResponseFut: std::future::Future<
1859            Output = Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error>,
1860        > + Send;
1861    fn r#wait_for_all_buffers_allocated(&self) -> Self::WaitForAllBuffersAllocatedResponseFut;
1862    type CheckAllBuffersAllocatedResponseFut: std::future::Future<
1863            Output = Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error>,
1864        > + Send;
1865    fn r#check_all_buffers_allocated(&self) -> Self::CheckAllBuffersAllocatedResponseFut;
1866    fn r#attach_token(
1867        &self,
1868        payload: BufferCollectionAttachTokenRequest,
1869    ) -> Result<(), fidl::Error>;
1870    fn r#attach_lifetime_tracking(
1871        &self,
1872        payload: BufferCollectionAttachLifetimeTrackingRequest,
1873    ) -> Result<(), fidl::Error>;
1874}
1875#[derive(Debug)]
1876#[cfg(target_os = "fuchsia")]
1877pub struct BufferCollectionSynchronousProxy {
1878    client: fidl::client::sync::Client,
1879}
1880
1881#[cfg(target_os = "fuchsia")]
1882impl fidl::endpoints::SynchronousProxy for BufferCollectionSynchronousProxy {
1883    type Proxy = BufferCollectionProxy;
1884    type Protocol = BufferCollectionMarker;
1885
1886    fn from_channel(inner: fidl::Channel) -> Self {
1887        Self::new(inner)
1888    }
1889
1890    fn into_channel(self) -> fidl::Channel {
1891        self.client.into_channel()
1892    }
1893
1894    fn as_channel(&self) -> &fidl::Channel {
1895        self.client.as_channel()
1896    }
1897}
1898
1899#[cfg(target_os = "fuchsia")]
1900impl BufferCollectionSynchronousProxy {
1901    pub fn new(channel: fidl::Channel) -> Self {
1902        let protocol_name = <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
1903        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
1904    }
1905
1906    pub fn into_channel(self) -> fidl::Channel {
1907        self.client.into_channel()
1908    }
1909
1910    /// Waits until an event arrives and returns it. It is safe for other
1911    /// threads to make concurrent requests while waiting for an event.
1912    pub fn wait_for_event(
1913        &self,
1914        deadline: zx::MonotonicInstant,
1915    ) -> Result<BufferCollectionEvent, fidl::Error> {
1916        BufferCollectionEvent::decode(self.client.wait_for_event(deadline)?)
1917    }
1918
1919    /// Ensure that previous messages have been received server side. This is
1920    /// particularly useful after previous messages that created new tokens,
1921    /// because a token must be known to the sysmem server before sending the
1922    /// token to another participant.
1923    ///
1924    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
1925    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
1926    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
1927    /// to mitigate the possibility of a hostile/fake
1928    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
1929    /// Another way is to pass the token to
1930    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
1931    /// the token as part of exchanging it for a
1932    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
1933    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
1934    /// of stalling.
1935    ///
1936    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
1937    /// and then starting and completing a `Sync`, it's then safe to send the
1938    /// `BufferCollectionToken` client ends to other participants knowing the
1939    /// server will recognize the tokens when they're sent by the other
1940    /// participants to sysmem in a
1941    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
1942    /// efficient way to create tokens while avoiding unnecessary round trips.
1943    ///
1944    /// Other options include waiting for each
1945    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
1946    /// individually (using separate call to `Sync` after each), or calling
1947    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
1948    /// converted to a `BufferCollection` via
1949    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
1950    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
1951    /// the sync step and can create multiple tokens at once.
1952    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
1953        let _response = self.client.send_query::<
1954            fidl::encoding::EmptyPayload,
1955            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
1956        >(
1957            (),
1958            0x11ac2555cf575b54,
1959            fidl::encoding::DynamicFlags::FLEXIBLE,
1960            ___deadline,
1961        )?
1962        .into_result::<BufferCollectionMarker>("sync")?;
1963        Ok(_response)
1964    }
1965
1966    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
1967    ///
1968    /// Normally a participant will convert a `BufferCollectionToken` into a
1969    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
1970    /// `Release` via the token (and then close the channel immediately or
1971    /// shortly later in response to server closing the server end), which
1972    /// avoids causing buffer collection failure. Without a prior `Release`,
1973    /// closing the `BufferCollectionToken` client end will cause buffer
1974    /// collection failure.
1975    ///
1976    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
1977    ///
1978    /// By default the server handles unexpected closure of a
1979    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
1980    /// first) by failing the buffer collection. Partly this is to expedite
1981    /// closing VMO handles to reclaim memory when any participant fails. If a
1982    /// participant would like to cleanly close a `BufferCollection` without
1983    /// causing buffer collection failure, the participant can send `Release`
1984    /// before closing the `BufferCollection` client end. The `Release` can
1985    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
1986    /// buffer collection won't require constraints from this node in order to
1987    /// allocate. If after `SetConstraints`, the constraints are retained and
1988    /// aggregated, despite the lack of `BufferCollection` connection at the
1989    /// time of constraints aggregation.
1990    ///
1991    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
1992    ///
1993    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
1994    /// end (without `Release` first) will trigger failure of the buffer
1995    /// collection. To close a `BufferCollectionTokenGroup` channel without
1996    /// failing the buffer collection, ensure that AllChildrenPresent() has been
1997    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
1998    /// client end.
1999    ///
2000    /// If `Release` occurs before
2001    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
2002    /// buffer collection will fail (triggered by reception of `Release` without
2003    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
2004    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
2005    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
2006    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
2007    /// close requires `AllChildrenPresent` (if not already sent), then
2008    /// `Release`, then close client end.
2009    ///
2010    /// If `Release` occurs after `AllChildrenPresent`, the children and all
2011    /// their constraints remain intact (just as they would if the
2012    /// `BufferCollectionTokenGroup` channel had remained open), and the client
2013    /// end close doesn't trigger buffer collection failure.
2014    ///
2015    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
2016    ///
2017    /// For brevity, the per-channel-protocol paragraphs above ignore the
2018    /// separate failure domain created by
2019    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
2020    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
2021    /// unexpectedly closes (without `Release` first) and that client end is
2022    /// under a failure domain, instead of failing the whole buffer collection,
2023    /// the failure domain is failed, but the buffer collection itself is
2024    /// isolated from failure of the failure domain. Such failure domains can be
2025    /// nested, in which case only the inner-most failure domain in which the
2026    /// `Node` resides fails.
2027    pub fn r#release(&self) -> Result<(), fidl::Error> {
2028        self.client.send::<fidl::encoding::EmptyPayload>(
2029            (),
2030            0x6a5cae7d6d6e04c6,
2031            fidl::encoding::DynamicFlags::FLEXIBLE,
2032        )
2033    }
2034
2035    /// Set a name for VMOs in this buffer collection.
2036    ///
2037    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
2038    /// will be truncated to fit. The name of the vmo will be suffixed with the
2039    /// buffer index within the collection (if the suffix fits within
2040    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
2041    /// listed in the inspect data.
2042    ///
2043    /// The name only affects VMOs allocated after the name is set; this call
2044    /// does not rename existing VMOs. If multiple clients set different names
2045    /// then the larger priority value will win. Setting a new name with the
2046    /// same priority as a prior name doesn't change the name.
2047    ///
2048    /// All table fields are currently required.
2049    ///
2050    /// + request `priority` The name is only set if this is the first `SetName`
2051    ///   or if `priority` is greater than any previous `priority` value in
2052    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
2053    /// + request `name` The name for VMOs created under this buffer collection.
2054    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
2055        self.client.send::<NodeSetNameRequest>(
2056            payload,
2057            0xb41f1624f48c1e9,
2058            fidl::encoding::DynamicFlags::FLEXIBLE,
2059        )
2060    }
2061
2062    /// Set information about the current client that can be used by sysmem to
2063    /// help diagnose leaking memory and allocation stalls waiting for a
2064    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
2065    ///
2066    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
2067    /// `Node`(s) derived from this `Node`, unless overriden by
2068    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
2069    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
2070    ///
2071    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
2072    /// `Allocator` is the most efficient way to ensure that all
2073    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
2074    /// set, and is also more efficient than separately sending the same debug
2075    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
2076    /// created [`fuchsia.sysmem2/Node`].
2077    ///
2078    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
2079    /// indicate which client is closing their channel first, leading to subtree
2080    /// failure (which can be normal if the purpose of the subtree is over, but
2081    /// if happening earlier than expected, the client-channel-specific name can
2082    /// help diagnose where the failure is first coming from, from sysmem's
2083    /// point of view).
2084    ///
2085    /// All table fields are currently required.
2086    ///
2087    /// + request `name` This can be an arbitrary string, but the current
2088    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
2089    /// + request `id` This can be an arbitrary id, but the current process ID
2090    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
2091    pub fn r#set_debug_client_info(
2092        &self,
2093        mut payload: &NodeSetDebugClientInfoRequest,
2094    ) -> Result<(), fidl::Error> {
2095        self.client.send::<NodeSetDebugClientInfoRequest>(
2096            payload,
2097            0x5cde8914608d99b1,
2098            fidl::encoding::DynamicFlags::FLEXIBLE,
2099        )
2100    }
2101
2102    /// Sysmem logs a warning if sysmem hasn't seen
2103    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
2104    /// within 5 seconds after creation of a new collection.
2105    ///
2106    /// Clients can call this method to change when the log is printed. If
2107    /// multiple client set the deadline, it's unspecified which deadline will
2108    /// take effect.
2109    ///
2110    /// In most cases the default works well.
2111    ///
2112    /// All table fields are currently required.
2113    ///
2114    /// + request `deadline` The time at which sysmem will start trying to log
2115    ///   the warning, unless all constraints are with sysmem by then.
2116    pub fn r#set_debug_timeout_log_deadline(
2117        &self,
2118        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
2119    ) -> Result<(), fidl::Error> {
2120        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
2121            payload,
2122            0x716b0af13d5c0806,
2123            fidl::encoding::DynamicFlags::FLEXIBLE,
2124        )
2125    }
2126
2127    /// This enables verbose logging for the buffer collection.
2128    ///
2129    /// Verbose logging includes constraints set via
2130    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
2131    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
2132    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
2133    /// the tree of `Node`(s).
2134    ///
2135    /// Normally sysmem prints only a single line complaint when aggregation
2136    /// fails, with just the specific detailed reason that aggregation failed,
2137    /// with little surrounding context.  While this is often enough to diagnose
2138    /// a problem if only a small change was made and everything was working
2139    /// before the small change, it's often not particularly helpful for getting
2140    /// a new buffer collection to work for the first time.  Especially with
2141    /// more complex trees of nodes, involving things like
2142    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
2143    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
2144    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
2145    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
2146    /// looks like and why it's failing a logical allocation, or why a tree or
2147    /// subtree is failing sooner than expected.
2148    ///
2149    /// The intent of the extra logging is to be acceptable from a performance
2150    /// point of view, under the assumption that verbose logging is only enabled
2151    /// on a low number of buffer collections. If we're not tracking down a bug,
2152    /// we shouldn't send this message.
2153    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
2154        self.client.send::<fidl::encoding::EmptyPayload>(
2155            (),
2156            0x5209c77415b4dfad,
2157            fidl::encoding::DynamicFlags::FLEXIBLE,
2158        )
2159    }
2160
2161    /// This gets a handle that can be used as a parameter to
2162    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
2163    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
2164    /// client obtained this handle from this `Node`.
2165    ///
2166    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
2167    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
2168    /// despite the two calls typically being on different channels.
2169    ///
2170    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
2171    ///
2172    /// All table fields are currently required.
2173    ///
2174    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
2175    ///   different `Node` channel, to prove that the client obtained the handle
2176    ///   from this `Node`.
2177    pub fn r#get_node_ref(
2178        &self,
2179        ___deadline: zx::MonotonicInstant,
2180    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
2181        let _response = self.client.send_query::<
2182            fidl::encoding::EmptyPayload,
2183            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
2184        >(
2185            (),
2186            0x5b3d0e51614df053,
2187            fidl::encoding::DynamicFlags::FLEXIBLE,
2188            ___deadline,
2189        )?
2190        .into_result::<BufferCollectionMarker>("get_node_ref")?;
2191        Ok(_response)
2192    }
2193
2194    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
2195    /// rooted at a different child token of a common parent
2196    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
2197    /// passed-in `node_ref`.
2198    ///
2199    /// This call is for assisting with admission control de-duplication, and
2200    /// with debugging.
2201    ///
2202    /// The `node_ref` must be obtained using
2203    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
2204    ///
2205    /// The `node_ref` can be a duplicated handle; it's not necessary to call
2206    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
2207    ///
2208    /// If a calling token may not actually be a valid token at all due to a
2209    /// potentially hostile/untrusted provider of the token, call
2210    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
2211    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
2212    /// never responds due to a calling token not being a real token (not really
2213    /// talking to sysmem).  Another option is to call
2214    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
2215    /// which also validates the token along with converting it to a
2216    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
2217    ///
2218    /// All table fields are currently required.
2219    ///
2220    /// - response `is_alternate`
2221    ///   - true: The first parent node in common between the calling node and
2222    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
2223    ///     that the calling `Node` and the `node_ref` `Node` will not have both
2224    ///     their constraints apply - rather sysmem will choose one or the other
2225    ///     of the constraints - never both.  This is because only one child of
2226    ///     a `BufferCollectionTokenGroup` is selected during logical
2227    ///     allocation, with only that one child's subtree contributing to
2228    ///     constraints aggregation.
2229    ///   - false: The first parent node in common between the calling `Node`
2230    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
2231    ///     Currently, this means the first parent node in common is a
2232    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
2233    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
2234    ///     `Node` may have both their constraints apply during constraints
2235    ///     aggregation of the logical allocation, if both `Node`(s) are
2236    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
2237    ///     this case, there is no `BufferCollectionTokenGroup` that will
2238    ///     directly prevent the two `Node`(s) from both being selected and
2239    ///     their constraints both aggregated, but even when false, one or both
2240    ///     `Node`(s) may still be eliminated from consideration if one or both
2241    ///     `Node`(s) has a direct or indirect parent
2242    ///     `BufferCollectionTokenGroup` which selects a child subtree other
2243    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
2244    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
2245    ///   associated with the same buffer collection as the calling `Node`.
2246    ///   Another reason for this error is if the `node_ref` is an
2247    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
2248    ///   a real `node_ref` obtained from `GetNodeRef`.
2249    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
2250    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
2251    ///   the needed rights expected on a real `node_ref`.
2252    /// * No other failing status codes are returned by this call.  However,
2253    ///   sysmem may add additional codes in future, so the client should have
2254    ///   sensible default handling for any failing status code.
2255    pub fn r#is_alternate_for(
2256        &self,
2257        mut payload: NodeIsAlternateForRequest,
2258        ___deadline: zx::MonotonicInstant,
2259    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
2260        let _response = self.client.send_query::<
2261            NodeIsAlternateForRequest,
2262            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
2263        >(
2264            &mut payload,
2265            0x3a58e00157e0825,
2266            fidl::encoding::DynamicFlags::FLEXIBLE,
2267            ___deadline,
2268        )?
2269        .into_result::<BufferCollectionMarker>("is_alternate_for")?;
2270        Ok(_response.map(|x| x))
2271    }
2272
2273    /// Get the buffer collection ID. This ID is also available from
2274    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
2275    /// within the collection).
2276    ///
2277    /// This call is mainly useful in situations where we can't convey a
2278    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
2279    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
2280    /// handle, which can be joined back up with a `BufferCollection` client end
2281    /// that was created via a different path. Prefer to convey a
2282    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
2283    ///
2284    /// Trusting a `buffer_collection_id` value from a source other than sysmem
2285    /// is analogous to trusting a koid value from a source other than zircon.
2286    /// Both should be avoided unless really necessary, and both require
2287    /// caution. In some situations it may be reasonable to refer to a
2288    /// pre-established `BufferCollection` by `buffer_collection_id` via a
2289    /// protocol for efficiency reasons, but an incoming value purporting to be
2290    /// a `buffer_collection_id` is not sufficient alone to justify granting the
2291    /// sender of the `buffer_collection_id` any capability. The sender must
2292    /// first prove to a receiver that the sender has/had a VMO or has/had a
2293    /// `BufferCollectionToken` to the same collection by sending a handle that
2294    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
2295    /// `buffer_collection_id` value. The receiver should take care to avoid
2296    /// assuming that a sender had a `BufferCollectionToken` in cases where the
2297    /// sender has only proven that the sender had a VMO.
2298    ///
2299    /// - response `buffer_collection_id` This ID is unique per buffer
2300    ///   collection per boot. Each buffer is uniquely identified by the
2301    ///   `buffer_collection_id` and `buffer_index` together.
2302    pub fn r#get_buffer_collection_id(
2303        &self,
2304        ___deadline: zx::MonotonicInstant,
2305    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
2306        let _response = self.client.send_query::<
2307            fidl::encoding::EmptyPayload,
2308            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
2309        >(
2310            (),
2311            0x77d19a494b78ba8c,
2312            fidl::encoding::DynamicFlags::FLEXIBLE,
2313            ___deadline,
2314        )?
2315        .into_result::<BufferCollectionMarker>("get_buffer_collection_id")?;
2316        Ok(_response)
2317    }
2318
2319    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
2320    /// created after this message to weak, which means that a client's `Node`
2321    /// client end (or a child created after this message) is not alone
2322    /// sufficient to keep allocated VMOs alive.
2323    ///
2324    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
2325    /// `close_weak_asap`.
2326    ///
2327    /// This message is only permitted before the `Node` becomes ready for
2328    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
2329    ///   * `BufferCollectionToken`: any time
2330    ///   * `BufferCollection`: before `SetConstraints`
2331    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
2332    ///
2333    /// Currently, no conversion from strong `Node` to weak `Node` after ready
2334    /// for allocation is provided, but a client can simulate that by creating
2335    /// an additional `Node` before allocation and setting that additional
2336    /// `Node` to weak, and then potentially at some point later sending
2337    /// `Release` and closing the client end of the client's strong `Node`, but
2338    /// keeping the client's weak `Node`.
2339    ///
2340    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
2341    /// collection failure (all `Node` client end(s) will see
2342    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
2343    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
2344    /// this situation until all `Node`(s) are ready for allocation. For initial
2345    /// allocation to succeed, at least one strong `Node` is required to exist
2346    /// at allocation time, but after that client receives VMO handles, that
2347    /// client can `BufferCollection.Release` and close the client end without
2348    /// causing this type of failure.
2349    ///
2350    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
2351    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
2352    /// separately as appropriate.
2353    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
2354        self.client.send::<fidl::encoding::EmptyPayload>(
2355            (),
2356            0x22dd3ea514eeffe1,
2357            fidl::encoding::DynamicFlags::FLEXIBLE,
2358        )
2359    }
2360
2361    /// This indicates to sysmem that the client is prepared to pay attention to
2362    /// `close_weak_asap`.
2363    ///
2364    /// If sent, this message must be before
2365    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
2366    ///
2367    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
2368    /// send this message before `WaitForAllBuffersAllocated`, or a parent
2369    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
2370    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
2371    /// trigger buffer collection failure.
2372    ///
2373    /// This message is necessary because weak sysmem VMOs have not always been
2374    /// a thing, so older clients are not aware of the need to pay attention to
2375    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
2376    /// sysmem weak VMO handles asap. By having this message and requiring
2377    /// participants to indicate their acceptance of this aspect of the overall
2378    /// protocol, we avoid situations where an older client is delivered a weak
2379    /// VMO without any way for sysmem to get that VMO to close quickly later
2380    /// (and on a per-buffer basis).
2381    ///
2382    /// A participant that doesn't handle `close_weak_asap` and also doesn't
2383    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
2384    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
2385    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
2386    /// same participant has a child/delegate which does retrieve VMOs, that
2387    /// child/delegate will need to send `SetWeakOk` before
2388    /// `WaitForAllBuffersAllocated`.
2389    ///
2390    /// + request `for_child_nodes_also` If present and true, this means direct
2391    ///   child nodes of this node created after this message plus all
2392    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
2393    ///   those nodes. Any child node of this node that was created before this
2394    ///   message is not included. This setting is "sticky" in the sense that a
2395    ///   subsequent `SetWeakOk` without this bool set to true does not reset
2396    ///   the server-side bool. If this creates a problem for a participant, a
2397    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
2398    ///   tokens instead, as appropriate. A participant should only set
2399    ///   `for_child_nodes_also` true if the participant can really promise to
2400    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
2401    ///   weak VMO handles held by participants holding the corresponding child
2402    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
2403    ///   which are using sysmem(1) can be weak, despite the clients of those
2404    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
2405    ///   direct way to find out about `close_weak_asap`. This only applies to
2406    ///   descendents of this `Node` which are using sysmem(1), not to this
2407    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
2408    ///   token, which will fail allocation unless an ancestor of this `Node`
2409    ///   specified `for_child_nodes_also` true.
2410    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
2411        self.client.send::<NodeSetWeakOkRequest>(
2412            &mut payload,
2413            0x38a44fc4d7724be9,
2414            fidl::encoding::DynamicFlags::FLEXIBLE,
2415        )
2416    }
2417
2418    /// The server_end will be closed after this `Node` and any child nodes have
2419    /// have released their buffer counts, making those counts available for
2420    /// reservation by a different `Node` via
2421    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
2422    ///
2423    /// The `Node` buffer counts may not be released until the entire tree of
2424    /// `Node`(s) is closed or failed, because
2425    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
2426    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
2427    /// `Node` buffer counts remain reserved until the orphaned node is later
2428    /// cleaned up.
2429    ///
2430    /// If the `Node` exceeds a fairly large number of attached eventpair server
2431    /// ends, a log message will indicate this and the `Node` (and the
2432    /// appropriate) sub-tree will fail.
2433    ///
2434    /// The `server_end` will remain open when
2435    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
2436    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
2437    /// [`fuchsia.sysmem2/BufferCollection`].
2438    ///
2439    /// This message can also be used with a
2440    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
2441    pub fn r#attach_node_tracking(
2442        &self,
2443        mut payload: NodeAttachNodeTrackingRequest,
2444    ) -> Result<(), fidl::Error> {
2445        self.client.send::<NodeAttachNodeTrackingRequest>(
2446            &mut payload,
2447            0x3f22f2a293d3cdac,
2448            fidl::encoding::DynamicFlags::FLEXIBLE,
2449        )
2450    }
2451
2452    /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
2453    /// collection.
2454    ///
2455    /// A participant may only call
2456    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
2457    /// [`fuchsia.sysmem2/BufferCollection`].
2458    ///
2459    /// For buffer allocation to be attempted, all holders of a
2460    /// `BufferCollection` client end need to call `SetConstraints` before
2461    /// sysmem will attempt to allocate buffers.
2462    ///
2463    /// + request `constraints` These are the constraints on the buffer
2464    ///   collection imposed by the sending client/participant.  The
2465    ///   `constraints` field is not required to be set. If not set, the client
2466    ///   is not setting any actual constraints, but is indicating that the
2467    ///   client has no constraints to set. A client that doesn't set the
2468    ///   `constraints` field won't receive any VMO handles, but can still find
2469    ///   out how many buffers were allocated and can still refer to buffers by
2470    ///   their `buffer_index`.
2471    pub fn r#set_constraints(
2472        &self,
2473        mut payload: BufferCollectionSetConstraintsRequest,
2474    ) -> Result<(), fidl::Error> {
2475        self.client.send::<BufferCollectionSetConstraintsRequest>(
2476            &mut payload,
2477            0x1fde0f19d650197b,
2478            fidl::encoding::DynamicFlags::FLEXIBLE,
2479        )
2480    }
2481
2482    /// Wait until all buffers are allocated.
2483    ///
2484    /// This FIDL call completes when buffers have been allocated, or completes
2485    /// with some failure detail if allocation has been attempted but failed.
2486    ///
2487    /// The following must occur before buffers will be allocated:
2488    ///   * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
2489    ///     collection must be turned in via `BindSharedCollection` to get a
2490    ///     [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
2491    ///     [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
2492    ///     or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
2493    ///     to them.
2494    ///   * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
2495    ///     must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
2496    ///     sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
2497    ///     sent to them.
2498    ///
2499    /// - result `buffer_collection_info` The VMO handles and other related
2500    ///   info.
2501    /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
2502    ///   cannot be fulfilled due to resource exhaustion.
2503    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
2504    ///   malformed.
2505    /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
2506    ///   request is valid but cannot be satisfied, perhaps due to hardware
2507    ///   limitations. This can happen if participants have incompatible
2508    ///   constraints (empty intersection, roughly speaking). See the log for
2509    ///   more info. In cases where a participant could potentially be treated
2510    ///   as optional, see [`BufferCollectionTokenGroup`]. When using
2511    ///   [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
2512    ///   error code if there aren't enough buffers in the pre-existing
2513    ///   collection to satisfy the constraints set on the attached token and
2514    ///   any sub-tree of tokens derived from the attached token.
2515    pub fn r#wait_for_all_buffers_allocated(
2516        &self,
2517        ___deadline: zx::MonotonicInstant,
2518    ) -> Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error> {
2519        let _response = self
2520            .client
2521            .send_query::<fidl::encoding::EmptyPayload, fidl::encoding::FlexibleResultType<
2522                BufferCollectionWaitForAllBuffersAllocatedResponse,
2523                Error,
2524            >>(
2525                (), 0x62300344b61404e, fidl::encoding::DynamicFlags::FLEXIBLE, ___deadline
2526            )?
2527            .into_result::<BufferCollectionMarker>("wait_for_all_buffers_allocated")?;
2528        Ok(_response.map(|x| x))
2529    }
2530
2531    /// Checks whether all the buffers have been allocated, in a polling
2532    /// fashion.
2533    ///
2534    /// * If the buffer collection has been allocated, returns success.
2535    /// * If the buffer collection failed allocation, returns the same
2536    ///   [`fuchsia.sysmem2/Error`] as
2537    ///   [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
2538    ///   return.
2539    /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
2540    ///   attempted allocation yet. This means that WaitForAllBuffersAllocated
2541    ///   would not respond quickly.
2542    pub fn r#check_all_buffers_allocated(
2543        &self,
2544        ___deadline: zx::MonotonicInstant,
2545    ) -> Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error> {
2546        let _response = self.client.send_query::<
2547            fidl::encoding::EmptyPayload,
2548            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
2549        >(
2550            (),
2551            0x35a5fe77ce939c10,
2552            fidl::encoding::DynamicFlags::FLEXIBLE,
2553            ___deadline,
2554        )?
2555        .into_result::<BufferCollectionMarker>("check_all_buffers_allocated")?;
2556        Ok(_response.map(|x| x))
2557    }
2558
2559    /// Create a new token to add a new participant to an existing logical
2560    /// buffer collection, if the existing collection's buffer counts,
2561    /// constraints, and participants allow.
2562    ///
2563    /// This can be useful in replacing a failed participant, and/or in
2564    /// adding/re-adding a participant after buffers have already been
2565    /// allocated.
2566    ///
2567    /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
2568    /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
2569    /// goes through the normal procedure of setting constraints or closing
2570    /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
2571    /// clients' point of view, despite the possibility that all the buffers
2572    /// were actually allocated previously. This process is called "logical
2573    /// allocation". Most instances of "allocation" in docs for other messages
2574    /// can also be read as "allocation or logical allocation" while remaining
2575    /// valid, but we just say "allocation" in most places for brevity/clarity
2576    /// of explanation, with the details of "logical allocation" left for the
2577    /// docs here on `AttachToken`.
2578    ///
2579    /// Failure of an attached `Node` does not propagate to the parent of the
2580    /// attached `Node`. More generally, failure of a child `Node` is blocked
2581    /// from reaching its parent `Node` if the child is attached, or if the
2582    /// child is dispensable and the failure occurred after logical allocation
2583    /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
2584    ///
2585    /// A participant may in some scenarios choose to initially use a
2586    /// dispensable token for a given instance of a delegate participant, and
2587    /// then later if the first instance of that delegate participant fails, a
2588    /// new second instance of that delegate participant my be given a token
2589    /// created with `AttachToken`.
2590    ///
2591    /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
2592    /// client end, the token acts like any other token. The client can
2593    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
2594    /// and can send the token to a different process/participant. The
2595    /// `BufferCollectionToken` `Node` should be converted to a
2596    /// `BufferCollection` `Node` as normal by sending
2597    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
2598    /// without causing subtree failure by sending
2599    /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
2600    /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
2601    /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
2602    /// the `BufferCollection`.
2603    ///
2604    /// Within the subtree, a success result from
2605    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
2606    /// the subtree participants' constraints were satisfiable using the
2607    /// already-existing buffer collection, the already-established
2608    /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
2609    /// constraints, and the already-existing other participants (already added
2610    /// via successful logical allocation) and their specified buffer counts in
2611    /// their constraints. A failure result means the new participants'
2612    /// constraints cannot be satisfied using the existing buffer collection and
2613    /// its already-added participants. Creating a new collection instead may
2614    /// allow all participants' constraints to be satisfied, assuming
2615    /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
2616    /// used.
2617    ///
2618    /// A token created with `AttachToken` performs constraints aggregation with
2619    /// all constraints currently in effect on the buffer collection, plus the
2620    /// attached token under consideration plus child tokens under the attached
2621    /// token which are not themselves an attached token or under such a token.
2622    /// Further subtrees under this subtree are considered for logical
2623    /// allocation only after this subtree has completed logical allocation.
2624    ///
2625    /// Assignment of existing buffers to participants'
2626    /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
2627    /// etc is first-come first-served, but a child can't logically allocate
2628    /// before all its parents have sent `SetConstraints`.
2629    ///
2630    /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
2631    /// in contrast to `AttachToken`, has the created token `Node` + child
2632    /// `Node`(s) (in the created subtree but not in any subtree under this
2633    /// subtree) participate in constraints aggregation along with its parent
2634    /// during the parent's allocation or logical allocation.
2635    ///
2636    /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
2637    /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
2638    /// sysmem before the new token can be passed to `BindSharedCollection`. The
2639    /// `Sync` of the new token can be accomplished with
2640    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
2641    /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
2642    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
2643    /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
2644    /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
2645    /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
2646    /// created token, to also sync those additional tokens to sysmem using a
2647    /// single round-trip.
2648    ///
2649    /// All table fields are currently required.
2650    ///
2651    /// + request `rights_attentuation_mask` This allows attenuating the VMO
2652    ///   rights of the subtree. These values for `rights_attenuation_mask`
2653    ///   result in no attenuation (note that 0 is not on this list):
2654    ///   + ZX_RIGHT_SAME_RIGHTS (preferred)
2655    ///   + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
2656    /// + request `token_request` The server end of the `BufferCollectionToken`
2657    ///   channel. The client retains the client end.
2658    pub fn r#attach_token(
2659        &self,
2660        mut payload: BufferCollectionAttachTokenRequest,
2661    ) -> Result<(), fidl::Error> {
2662        self.client.send::<BufferCollectionAttachTokenRequest>(
2663            &mut payload,
2664            0x46ac7d0008492982,
2665            fidl::encoding::DynamicFlags::FLEXIBLE,
2666        )
2667    }
2668
2669    /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
2670    /// buffers have been allocated and only the specified number of buffers (or
2671    /// fewer) remain in the buffer collection.
2672    ///
2673    /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
2674    /// client to wait until an old buffer collection is fully or mostly
2675    /// deallocated before attempting allocation of a new buffer collection. The
2676    /// eventpair is only signalled when the buffers of this collection have
2677    /// been fully deallocated (not just un-referenced by clients, but all the
2678    /// memory consumed by those buffers has been fully reclaimed/recycled), or
2679    /// when allocation or logical allocation fails for the tree or subtree
2680    /// including this [`fuchsia.sysmem2/BufferCollection`].
2681    ///
2682    /// The eventpair won't be signalled until allocation or logical allocation
2683    /// has completed; until then, the collection's current buffer count is
2684    /// ignored.
2685    ///
2686    /// If logical allocation fails for an attached subtree (using
2687    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
2688    /// eventpair will close during that failure regardless of the number of
2689    /// buffers potenitally allocated in the overall buffer collection. This is
2690    /// for logical allocation consistency with normal allocation.
2691    ///
2692    /// The lifetime signalled by this event includes asynchronous cleanup of
2693    /// allocated buffers, and this asynchronous cleanup cannot occur until all
2694    /// holders of VMO handles to the buffers have closed those VMO handles.
2695    /// Therefore, clients should take care not to become blocked forever
2696    /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
2697    /// participants using the logical buffer collection (including the waiter
2698    /// itself) are less trusted, less reliable, or potentially blocked by the
2699    /// wait itself. Waiting asynchronously is recommended. Setting a deadline
2700    /// for the client wait may be prudent, depending on details of how the
2701    /// collection and/or its VMOs are used or shared. Failure to allocate a
2702    /// new/replacement buffer collection is better than getting stuck forever.
2703    ///
2704    /// The sysmem server itself intentionally does not perform any waiting on
2705    /// already-failed collections' VMOs to finish cleaning up before attempting
2706    /// a new allocation, and the sysmem server intentionally doesn't retry
2707    /// allocation if a new allocation fails due to out of memory, even if that
2708    /// failure is potentially due to continued existence of an old collection's
2709    /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
2710    /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
2711    /// as long as the waiting client is careful to not create a deadlock.
2712    ///
2713    /// Continued existence of old collections that are still cleaning up is not
2714    /// the only reason that a new allocation may fail due to insufficient
2715    /// memory, even if the new allocation is allocating physically contiguous
2716    /// buffers. Overall system memory pressure can also be the cause of failure
2717    /// to allocate a new collection. See also
2718    /// [`fuchsia.memorypressure/Provider`].
2719    ///
2720    /// `AttachLifetimeTracking` is meant to be compatible with other protocols
2721    /// with a similar `AttachLifetimeTracking` message; duplicates of the same
2722    /// `eventpair` handle (server end) can be sent via more than one
2723    /// `AttachLifetimeTracking` message to different protocols, and the
2724    /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
2725    /// the conditions are met (all holders of duplicates have closed their
2726    /// server end handle(s)). Also, thanks to how eventpair endponts work, the
2727    /// client end can (also) be duplicated without preventing the
2728    /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
2729    ///
2730    /// The server intentionally doesn't "trust" any signals set on the
2731    /// `server_end`. This mechanism intentionally uses only
2732    /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
2733    /// "early", and is only set when all handles to the server end eventpair
2734    /// are closed. No meaning is associated with any of the other signals, and
2735    /// clients should ignore any other signal bits on either end of the
2736    /// `eventpair`.
2737    ///
2738    /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
2739    /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
2740    /// transfer without causing `BufferCollection` channel failure).
2741    ///
2742    /// All table fields are currently required.
2743    ///
2744    /// + request `server_end` This eventpair handle will be closed by the
2745    ///   sysmem server when buffers have been allocated initially and the
2746    ///   number of buffers is then less than or equal to `buffers_remaining`.
2747    /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
2748    ///   fewer) buffers to be fully deallocated. A number greater than zero can
2749    ///   be useful in situations where a known number of buffers are
2750    ///   intentionally not closed so that the data can continue to be used,
2751    ///   such as for keeping the last available video frame displayed in the UI
2752    ///   even if the video stream was using protected output buffers. It's
2753    ///   outside the scope of the `BufferCollection` interface (at least for
2754    ///   now) to determine how many buffers may be held without closing, but
2755    ///   it'll typically be in the range 0-2.
2756    pub fn r#attach_lifetime_tracking(
2757        &self,
2758        mut payload: BufferCollectionAttachLifetimeTrackingRequest,
2759    ) -> Result<(), fidl::Error> {
2760        self.client.send::<BufferCollectionAttachLifetimeTrackingRequest>(
2761            &mut payload,
2762            0x3ecb510113116dcf,
2763            fidl::encoding::DynamicFlags::FLEXIBLE,
2764        )
2765    }
2766}
2767
2768#[cfg(target_os = "fuchsia")]
2769impl From<BufferCollectionSynchronousProxy> for zx::Handle {
2770    fn from(value: BufferCollectionSynchronousProxy) -> Self {
2771        value.into_channel().into()
2772    }
2773}
2774
2775#[cfg(target_os = "fuchsia")]
2776impl From<fidl::Channel> for BufferCollectionSynchronousProxy {
2777    fn from(value: fidl::Channel) -> Self {
2778        Self::new(value)
2779    }
2780}
2781
2782#[derive(Debug, Clone)]
2783pub struct BufferCollectionProxy {
2784    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
2785}
2786
2787impl fidl::endpoints::Proxy for BufferCollectionProxy {
2788    type Protocol = BufferCollectionMarker;
2789
2790    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
2791        Self::new(inner)
2792    }
2793
2794    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
2795        self.client.into_channel().map_err(|client| Self { client })
2796    }
2797
2798    fn as_channel(&self) -> &::fidl::AsyncChannel {
2799        self.client.as_channel()
2800    }
2801}
2802
2803impl BufferCollectionProxy {
2804    /// Create a new Proxy for fuchsia.sysmem2/BufferCollection.
2805    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
2806        let protocol_name = <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
2807        Self { client: fidl::client::Client::new(channel, protocol_name) }
2808    }
2809
2810    /// Get a Stream of events from the remote end of the protocol.
2811    ///
2812    /// # Panics
2813    ///
2814    /// Panics if the event stream was already taken.
2815    pub fn take_event_stream(&self) -> BufferCollectionEventStream {
2816        BufferCollectionEventStream { event_receiver: self.client.take_event_receiver() }
2817    }
2818
2819    /// Ensure that previous messages have been received server side. This is
2820    /// particularly useful after previous messages that created new tokens,
2821    /// because a token must be known to the sysmem server before sending the
2822    /// token to another participant.
2823    ///
2824    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
2825    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
2826    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
2827    /// to mitigate the possibility of a hostile/fake
2828    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
2829    /// Another way is to pass the token to
2830    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
2831    /// the token as part of exchanging it for a
2832    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
2833    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
2834    /// of stalling.
2835    ///
2836    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
2837    /// and then starting and completing a `Sync`, it's then safe to send the
2838    /// `BufferCollectionToken` client ends to other participants knowing the
2839    /// server will recognize the tokens when they're sent by the other
2840    /// participants to sysmem in a
2841    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
2842    /// efficient way to create tokens while avoiding unnecessary round trips.
2843    ///
2844    /// Other options include waiting for each
2845    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
2846    /// individually (using separate call to `Sync` after each), or calling
2847    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
2848    /// converted to a `BufferCollection` via
2849    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
2850    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
2851    /// the sync step and can create multiple tokens at once.
2852    pub fn r#sync(
2853        &self,
2854    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
2855        BufferCollectionProxyInterface::r#sync(self)
2856    }
2857
2858    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
2859    ///
2860    /// Normally a participant will convert a `BufferCollectionToken` into a
2861    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
2862    /// `Release` via the token (and then close the channel immediately or
2863    /// shortly later in response to server closing the server end), which
2864    /// avoids causing buffer collection failure. Without a prior `Release`,
2865    /// closing the `BufferCollectionToken` client end will cause buffer
2866    /// collection failure.
2867    ///
2868    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
2869    ///
2870    /// By default the server handles unexpected closure of a
2871    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
2872    /// first) by failing the buffer collection. Partly this is to expedite
2873    /// closing VMO handles to reclaim memory when any participant fails. If a
2874    /// participant would like to cleanly close a `BufferCollection` without
2875    /// causing buffer collection failure, the participant can send `Release`
2876    /// before closing the `BufferCollection` client end. The `Release` can
2877    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
2878    /// buffer collection won't require constraints from this node in order to
2879    /// allocate. If after `SetConstraints`, the constraints are retained and
2880    /// aggregated, despite the lack of `BufferCollection` connection at the
2881    /// time of constraints aggregation.
2882    ///
2883    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
2884    ///
2885    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
2886    /// end (without `Release` first) will trigger failure of the buffer
2887    /// collection. To close a `BufferCollectionTokenGroup` channel without
2888    /// failing the buffer collection, ensure that AllChildrenPresent() has been
2889    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
2890    /// client end.
2891    ///
2892    /// If `Release` occurs before
2893    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
2894    /// buffer collection will fail (triggered by reception of `Release` without
2895    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
2896    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
2897    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
2898    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
2899    /// close requires `AllChildrenPresent` (if not already sent), then
2900    /// `Release`, then close client end.
2901    ///
2902    /// If `Release` occurs after `AllChildrenPresent`, the children and all
2903    /// their constraints remain intact (just as they would if the
2904    /// `BufferCollectionTokenGroup` channel had remained open), and the client
2905    /// end close doesn't trigger buffer collection failure.
2906    ///
2907    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
2908    ///
2909    /// For brevity, the per-channel-protocol paragraphs above ignore the
2910    /// separate failure domain created by
2911    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
2912    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
2913    /// unexpectedly closes (without `Release` first) and that client end is
2914    /// under a failure domain, instead of failing the whole buffer collection,
2915    /// the failure domain is failed, but the buffer collection itself is
2916    /// isolated from failure of the failure domain. Such failure domains can be
2917    /// nested, in which case only the inner-most failure domain in which the
2918    /// `Node` resides fails.
2919    pub fn r#release(&self) -> Result<(), fidl::Error> {
2920        BufferCollectionProxyInterface::r#release(self)
2921    }
2922
2923    /// Set a name for VMOs in this buffer collection.
2924    ///
2925    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
2926    /// will be truncated to fit. The name of the vmo will be suffixed with the
2927    /// buffer index within the collection (if the suffix fits within
2928    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
2929    /// listed in the inspect data.
2930    ///
2931    /// The name only affects VMOs allocated after the name is set; this call
2932    /// does not rename existing VMOs. If multiple clients set different names
2933    /// then the larger priority value will win. Setting a new name with the
2934    /// same priority as a prior name doesn't change the name.
2935    ///
2936    /// All table fields are currently required.
2937    ///
2938    /// + request `priority` The name is only set if this is the first `SetName`
2939    ///   or if `priority` is greater than any previous `priority` value in
2940    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
2941    /// + request `name` The name for VMOs created under this buffer collection.
2942    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
2943        BufferCollectionProxyInterface::r#set_name(self, payload)
2944    }
2945
2946    /// Set information about the current client that can be used by sysmem to
2947    /// help diagnose leaking memory and allocation stalls waiting for a
2948    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
2949    ///
2950    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
2951    /// `Node`(s) derived from this `Node`, unless overriden by
2952    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
2953    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
2954    ///
2955    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
2956    /// `Allocator` is the most efficient way to ensure that all
2957    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
2958    /// set, and is also more efficient than separately sending the same debug
2959    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
2960    /// created [`fuchsia.sysmem2/Node`].
2961    ///
2962    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
2963    /// indicate which client is closing their channel first, leading to subtree
2964    /// failure (which can be normal if the purpose of the subtree is over, but
2965    /// if happening earlier than expected, the client-channel-specific name can
2966    /// help diagnose where the failure is first coming from, from sysmem's
2967    /// point of view).
2968    ///
2969    /// All table fields are currently required.
2970    ///
2971    /// + request `name` This can be an arbitrary string, but the current
2972    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
2973    /// + request `id` This can be an arbitrary id, but the current process ID
2974    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
2975    pub fn r#set_debug_client_info(
2976        &self,
2977        mut payload: &NodeSetDebugClientInfoRequest,
2978    ) -> Result<(), fidl::Error> {
2979        BufferCollectionProxyInterface::r#set_debug_client_info(self, payload)
2980    }
2981
2982    /// Sysmem logs a warning if sysmem hasn't seen
2983    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
2984    /// within 5 seconds after creation of a new collection.
2985    ///
2986    /// Clients can call this method to change when the log is printed. If
2987    /// multiple client set the deadline, it's unspecified which deadline will
2988    /// take effect.
2989    ///
2990    /// In most cases the default works well.
2991    ///
2992    /// All table fields are currently required.
2993    ///
2994    /// + request `deadline` The time at which sysmem will start trying to log
2995    ///   the warning, unless all constraints are with sysmem by then.
2996    pub fn r#set_debug_timeout_log_deadline(
2997        &self,
2998        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
2999    ) -> Result<(), fidl::Error> {
3000        BufferCollectionProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
3001    }
3002
3003    /// This enables verbose logging for the buffer collection.
3004    ///
3005    /// Verbose logging includes constraints set via
3006    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
3007    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
3008    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
3009    /// the tree of `Node`(s).
3010    ///
3011    /// Normally sysmem prints only a single line complaint when aggregation
3012    /// fails, with just the specific detailed reason that aggregation failed,
3013    /// with little surrounding context.  While this is often enough to diagnose
3014    /// a problem if only a small change was made and everything was working
3015    /// before the small change, it's often not particularly helpful for getting
3016    /// a new buffer collection to work for the first time.  Especially with
3017    /// more complex trees of nodes, involving things like
3018    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
3019    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
3020    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
3021    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
3022    /// looks like and why it's failing a logical allocation, or why a tree or
3023    /// subtree is failing sooner than expected.
3024    ///
3025    /// The intent of the extra logging is to be acceptable from a performance
3026    /// point of view, under the assumption that verbose logging is only enabled
3027    /// on a low number of buffer collections. If we're not tracking down a bug,
3028    /// we shouldn't send this message.
3029    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
3030        BufferCollectionProxyInterface::r#set_verbose_logging(self)
3031    }
3032
3033    /// This gets a handle that can be used as a parameter to
3034    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
3035    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
3036    /// client obtained this handle from this `Node`.
3037    ///
3038    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
3039    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
3040    /// despite the two calls typically being on different channels.
3041    ///
3042    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
3043    ///
3044    /// All table fields are currently required.
3045    ///
3046    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
3047    ///   different `Node` channel, to prove that the client obtained the handle
3048    ///   from this `Node`.
3049    pub fn r#get_node_ref(
3050        &self,
3051    ) -> fidl::client::QueryResponseFut<
3052        NodeGetNodeRefResponse,
3053        fidl::encoding::DefaultFuchsiaResourceDialect,
3054    > {
3055        BufferCollectionProxyInterface::r#get_node_ref(self)
3056    }
3057
3058    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
3059    /// rooted at a different child token of a common parent
3060    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
3061    /// passed-in `node_ref`.
3062    ///
3063    /// This call is for assisting with admission control de-duplication, and
3064    /// with debugging.
3065    ///
3066    /// The `node_ref` must be obtained using
3067    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
3068    ///
3069    /// The `node_ref` can be a duplicated handle; it's not necessary to call
3070    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
3071    ///
3072    /// If a calling token may not actually be a valid token at all due to a
3073    /// potentially hostile/untrusted provider of the token, call
3074    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
3075    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
3076    /// never responds due to a calling token not being a real token (not really
3077    /// talking to sysmem).  Another option is to call
3078    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
3079    /// which also validates the token along with converting it to a
3080    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
3081    ///
3082    /// All table fields are currently required.
3083    ///
3084    /// - response `is_alternate`
3085    ///   - true: The first parent node in common between the calling node and
3086    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
3087    ///     that the calling `Node` and the `node_ref` `Node` will not have both
3088    ///     their constraints apply - rather sysmem will choose one or the other
3089    ///     of the constraints - never both.  This is because only one child of
3090    ///     a `BufferCollectionTokenGroup` is selected during logical
3091    ///     allocation, with only that one child's subtree contributing to
3092    ///     constraints aggregation.
3093    ///   - false: The first parent node in common between the calling `Node`
3094    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
3095    ///     Currently, this means the first parent node in common is a
3096    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
3097    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
3098    ///     `Node` may have both their constraints apply during constraints
3099    ///     aggregation of the logical allocation, if both `Node`(s) are
3100    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
3101    ///     this case, there is no `BufferCollectionTokenGroup` that will
3102    ///     directly prevent the two `Node`(s) from both being selected and
3103    ///     their constraints both aggregated, but even when false, one or both
3104    ///     `Node`(s) may still be eliminated from consideration if one or both
3105    ///     `Node`(s) has a direct or indirect parent
3106    ///     `BufferCollectionTokenGroup` which selects a child subtree other
3107    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
3108    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
3109    ///   associated with the same buffer collection as the calling `Node`.
3110    ///   Another reason for this error is if the `node_ref` is an
3111    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
3112    ///   a real `node_ref` obtained from `GetNodeRef`.
3113    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
3114    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
3115    ///   the needed rights expected on a real `node_ref`.
3116    /// * No other failing status codes are returned by this call.  However,
3117    ///   sysmem may add additional codes in future, so the client should have
3118    ///   sensible default handling for any failing status code.
3119    pub fn r#is_alternate_for(
3120        &self,
3121        mut payload: NodeIsAlternateForRequest,
3122    ) -> fidl::client::QueryResponseFut<
3123        NodeIsAlternateForResult,
3124        fidl::encoding::DefaultFuchsiaResourceDialect,
3125    > {
3126        BufferCollectionProxyInterface::r#is_alternate_for(self, payload)
3127    }
3128
3129    /// Get the buffer collection ID. This ID is also available from
3130    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
3131    /// within the collection).
3132    ///
3133    /// This call is mainly useful in situations where we can't convey a
3134    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
3135    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
3136    /// handle, which can be joined back up with a `BufferCollection` client end
3137    /// that was created via a different path. Prefer to convey a
3138    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
3139    ///
3140    /// Trusting a `buffer_collection_id` value from a source other than sysmem
3141    /// is analogous to trusting a koid value from a source other than zircon.
3142    /// Both should be avoided unless really necessary, and both require
3143    /// caution. In some situations it may be reasonable to refer to a
3144    /// pre-established `BufferCollection` by `buffer_collection_id` via a
3145    /// protocol for efficiency reasons, but an incoming value purporting to be
3146    /// a `buffer_collection_id` is not sufficient alone to justify granting the
3147    /// sender of the `buffer_collection_id` any capability. The sender must
3148    /// first prove to a receiver that the sender has/had a VMO or has/had a
3149    /// `BufferCollectionToken` to the same collection by sending a handle that
3150    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
3151    /// `buffer_collection_id` value. The receiver should take care to avoid
3152    /// assuming that a sender had a `BufferCollectionToken` in cases where the
3153    /// sender has only proven that the sender had a VMO.
3154    ///
3155    /// - response `buffer_collection_id` This ID is unique per buffer
3156    ///   collection per boot. Each buffer is uniquely identified by the
3157    ///   `buffer_collection_id` and `buffer_index` together.
3158    pub fn r#get_buffer_collection_id(
3159        &self,
3160    ) -> fidl::client::QueryResponseFut<
3161        NodeGetBufferCollectionIdResponse,
3162        fidl::encoding::DefaultFuchsiaResourceDialect,
3163    > {
3164        BufferCollectionProxyInterface::r#get_buffer_collection_id(self)
3165    }
3166
3167    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
3168    /// created after this message to weak, which means that a client's `Node`
3169    /// client end (or a child created after this message) is not alone
3170    /// sufficient to keep allocated VMOs alive.
3171    ///
3172    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
3173    /// `close_weak_asap`.
3174    ///
3175    /// This message is only permitted before the `Node` becomes ready for
3176    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
3177    ///   * `BufferCollectionToken`: any time
3178    ///   * `BufferCollection`: before `SetConstraints`
3179    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
3180    ///
3181    /// Currently, no conversion from strong `Node` to weak `Node` after ready
3182    /// for allocation is provided, but a client can simulate that by creating
3183    /// an additional `Node` before allocation and setting that additional
3184    /// `Node` to weak, and then potentially at some point later sending
3185    /// `Release` and closing the client end of the client's strong `Node`, but
3186    /// keeping the client's weak `Node`.
3187    ///
3188    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
3189    /// collection failure (all `Node` client end(s) will see
3190    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
3191    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
3192    /// this situation until all `Node`(s) are ready for allocation. For initial
3193    /// allocation to succeed, at least one strong `Node` is required to exist
3194    /// at allocation time, but after that client receives VMO handles, that
3195    /// client can `BufferCollection.Release` and close the client end without
3196    /// causing this type of failure.
3197    ///
3198    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
3199    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
3200    /// separately as appropriate.
3201    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
3202        BufferCollectionProxyInterface::r#set_weak(self)
3203    }
3204
3205    /// This indicates to sysmem that the client is prepared to pay attention to
3206    /// `close_weak_asap`.
3207    ///
3208    /// If sent, this message must be before
3209    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
3210    ///
3211    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
3212    /// send this message before `WaitForAllBuffersAllocated`, or a parent
3213    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
3214    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
3215    /// trigger buffer collection failure.
3216    ///
3217    /// This message is necessary because weak sysmem VMOs have not always been
3218    /// a thing, so older clients are not aware of the need to pay attention to
3219    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
3220    /// sysmem weak VMO handles asap. By having this message and requiring
3221    /// participants to indicate their acceptance of this aspect of the overall
3222    /// protocol, we avoid situations where an older client is delivered a weak
3223    /// VMO without any way for sysmem to get that VMO to close quickly later
3224    /// (and on a per-buffer basis).
3225    ///
3226    /// A participant that doesn't handle `close_weak_asap` and also doesn't
3227    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
3228    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
3229    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
3230    /// same participant has a child/delegate which does retrieve VMOs, that
3231    /// child/delegate will need to send `SetWeakOk` before
3232    /// `WaitForAllBuffersAllocated`.
3233    ///
3234    /// + request `for_child_nodes_also` If present and true, this means direct
3235    ///   child nodes of this node created after this message plus all
3236    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
3237    ///   those nodes. Any child node of this node that was created before this
3238    ///   message is not included. This setting is "sticky" in the sense that a
3239    ///   subsequent `SetWeakOk` without this bool set to true does not reset
3240    ///   the server-side bool. If this creates a problem for a participant, a
3241    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
3242    ///   tokens instead, as appropriate. A participant should only set
3243    ///   `for_child_nodes_also` true if the participant can really promise to
3244    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
3245    ///   weak VMO handles held by participants holding the corresponding child
3246    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
3247    ///   which are using sysmem(1) can be weak, despite the clients of those
3248    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
3249    ///   direct way to find out about `close_weak_asap`. This only applies to
3250    ///   descendents of this `Node` which are using sysmem(1), not to this
3251    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
3252    ///   token, which will fail allocation unless an ancestor of this `Node`
3253    ///   specified `for_child_nodes_also` true.
3254    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
3255        BufferCollectionProxyInterface::r#set_weak_ok(self, payload)
3256    }
3257
3258    /// The server_end will be closed after this `Node` and any child nodes have
3259    /// have released their buffer counts, making those counts available for
3260    /// reservation by a different `Node` via
3261    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
3262    ///
3263    /// The `Node` buffer counts may not be released until the entire tree of
3264    /// `Node`(s) is closed or failed, because
3265    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
3266    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
3267    /// `Node` buffer counts remain reserved until the orphaned node is later
3268    /// cleaned up.
3269    ///
3270    /// If the `Node` exceeds a fairly large number of attached eventpair server
3271    /// ends, a log message will indicate this and the `Node` (and the
3272    /// appropriate) sub-tree will fail.
3273    ///
3274    /// The `server_end` will remain open when
3275    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
3276    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
3277    /// [`fuchsia.sysmem2/BufferCollection`].
3278    ///
3279    /// This message can also be used with a
3280    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
3281    pub fn r#attach_node_tracking(
3282        &self,
3283        mut payload: NodeAttachNodeTrackingRequest,
3284    ) -> Result<(), fidl::Error> {
3285        BufferCollectionProxyInterface::r#attach_node_tracking(self, payload)
3286    }
3287
3288    /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
3289    /// collection.
3290    ///
3291    /// A participant may only call
3292    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
3293    /// [`fuchsia.sysmem2/BufferCollection`].
3294    ///
3295    /// For buffer allocation to be attempted, all holders of a
3296    /// `BufferCollection` client end need to call `SetConstraints` before
3297    /// sysmem will attempt to allocate buffers.
3298    ///
3299    /// + request `constraints` These are the constraints on the buffer
3300    ///   collection imposed by the sending client/participant.  The
3301    ///   `constraints` field is not required to be set. If not set, the client
3302    ///   is not setting any actual constraints, but is indicating that the
3303    ///   client has no constraints to set. A client that doesn't set the
3304    ///   `constraints` field won't receive any VMO handles, but can still find
3305    ///   out how many buffers were allocated and can still refer to buffers by
3306    ///   their `buffer_index`.
3307    pub fn r#set_constraints(
3308        &self,
3309        mut payload: BufferCollectionSetConstraintsRequest,
3310    ) -> Result<(), fidl::Error> {
3311        BufferCollectionProxyInterface::r#set_constraints(self, payload)
3312    }
3313
3314    /// Wait until all buffers are allocated.
3315    ///
3316    /// This FIDL call completes when buffers have been allocated, or completes
3317    /// with some failure detail if allocation has been attempted but failed.
3318    ///
3319    /// The following must occur before buffers will be allocated:
3320    ///   * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
3321    ///     collection must be turned in via `BindSharedCollection` to get a
3322    ///     [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
3323    ///     [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
3324    ///     or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
3325    ///     to them.
3326    ///   * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
3327    ///     must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
3328    ///     sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
3329    ///     sent to them.
3330    ///
3331    /// - result `buffer_collection_info` The VMO handles and other related
3332    ///   info.
3333    /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
3334    ///   cannot be fulfilled due to resource exhaustion.
3335    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
3336    ///   malformed.
3337    /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
3338    ///   request is valid but cannot be satisfied, perhaps due to hardware
3339    ///   limitations. This can happen if participants have incompatible
3340    ///   constraints (empty intersection, roughly speaking). See the log for
3341    ///   more info. In cases where a participant could potentially be treated
3342    ///   as optional, see [`BufferCollectionTokenGroup`]. When using
3343    ///   [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
3344    ///   error code if there aren't enough buffers in the pre-existing
3345    ///   collection to satisfy the constraints set on the attached token and
3346    ///   any sub-tree of tokens derived from the attached token.
3347    pub fn r#wait_for_all_buffers_allocated(
3348        &self,
3349    ) -> fidl::client::QueryResponseFut<
3350        BufferCollectionWaitForAllBuffersAllocatedResult,
3351        fidl::encoding::DefaultFuchsiaResourceDialect,
3352    > {
3353        BufferCollectionProxyInterface::r#wait_for_all_buffers_allocated(self)
3354    }
3355
3356    /// Checks whether all the buffers have been allocated, in a polling
3357    /// fashion.
3358    ///
3359    /// * If the buffer collection has been allocated, returns success.
3360    /// * If the buffer collection failed allocation, returns the same
3361    ///   [`fuchsia.sysmem2/Error`] as
3362    ///   [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
3363    ///   return.
3364    /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
3365    ///   attempted allocation yet. This means that WaitForAllBuffersAllocated
3366    ///   would not respond quickly.
3367    pub fn r#check_all_buffers_allocated(
3368        &self,
3369    ) -> fidl::client::QueryResponseFut<
3370        BufferCollectionCheckAllBuffersAllocatedResult,
3371        fidl::encoding::DefaultFuchsiaResourceDialect,
3372    > {
3373        BufferCollectionProxyInterface::r#check_all_buffers_allocated(self)
3374    }
3375
3376    /// Create a new token to add a new participant to an existing logical
3377    /// buffer collection, if the existing collection's buffer counts,
3378    /// constraints, and participants allow.
3379    ///
3380    /// This can be useful in replacing a failed participant, and/or in
3381    /// adding/re-adding a participant after buffers have already been
3382    /// allocated.
3383    ///
3384    /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
3385    /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
3386    /// goes through the normal procedure of setting constraints or closing
3387    /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
3388    /// clients' point of view, despite the possibility that all the buffers
3389    /// were actually allocated previously. This process is called "logical
3390    /// allocation". Most instances of "allocation" in docs for other messages
3391    /// can also be read as "allocation or logical allocation" while remaining
3392    /// valid, but we just say "allocation" in most places for brevity/clarity
3393    /// of explanation, with the details of "logical allocation" left for the
3394    /// docs here on `AttachToken`.
3395    ///
3396    /// Failure of an attached `Node` does not propagate to the parent of the
3397    /// attached `Node`. More generally, failure of a child `Node` is blocked
3398    /// from reaching its parent `Node` if the child is attached, or if the
3399    /// child is dispensable and the failure occurred after logical allocation
3400    /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
3401    ///
3402    /// A participant may in some scenarios choose to initially use a
3403    /// dispensable token for a given instance of a delegate participant, and
3404    /// then later if the first instance of that delegate participant fails, a
3405    /// new second instance of that delegate participant my be given a token
3406    /// created with `AttachToken`.
3407    ///
3408    /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
3409    /// client end, the token acts like any other token. The client can
3410    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
3411    /// and can send the token to a different process/participant. The
3412    /// `BufferCollectionToken` `Node` should be converted to a
3413    /// `BufferCollection` `Node` as normal by sending
3414    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
3415    /// without causing subtree failure by sending
3416    /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
3417    /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
3418    /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
3419    /// the `BufferCollection`.
3420    ///
3421    /// Within the subtree, a success result from
3422    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
3423    /// the subtree participants' constraints were satisfiable using the
3424    /// already-existing buffer collection, the already-established
3425    /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
3426    /// constraints, and the already-existing other participants (already added
3427    /// via successful logical allocation) and their specified buffer counts in
3428    /// their constraints. A failure result means the new participants'
3429    /// constraints cannot be satisfied using the existing buffer collection and
3430    /// its already-added participants. Creating a new collection instead may
3431    /// allow all participants' constraints to be satisfied, assuming
3432    /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
3433    /// used.
3434    ///
3435    /// A token created with `AttachToken` performs constraints aggregation with
3436    /// all constraints currently in effect on the buffer collection, plus the
3437    /// attached token under consideration plus child tokens under the attached
3438    /// token which are not themselves an attached token or under such a token.
3439    /// Further subtrees under this subtree are considered for logical
3440    /// allocation only after this subtree has completed logical allocation.
3441    ///
3442    /// Assignment of existing buffers to participants'
3443    /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
3444    /// etc is first-come first-served, but a child can't logically allocate
3445    /// before all its parents have sent `SetConstraints`.
3446    ///
3447    /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
3448    /// in contrast to `AttachToken`, has the created token `Node` + child
3449    /// `Node`(s) (in the created subtree but not in any subtree under this
3450    /// subtree) participate in constraints aggregation along with its parent
3451    /// during the parent's allocation or logical allocation.
3452    ///
3453    /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
3454    /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
3455    /// sysmem before the new token can be passed to `BindSharedCollection`. The
3456    /// `Sync` of the new token can be accomplished with
3457    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
3458    /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
3459    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
3460    /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
3461    /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
3462    /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
3463    /// created token, to also sync those additional tokens to sysmem using a
3464    /// single round-trip.
3465    ///
3466    /// All table fields are currently required.
3467    ///
3468    /// + request `rights_attentuation_mask` This allows attenuating the VMO
3469    ///   rights of the subtree. These values for `rights_attenuation_mask`
3470    ///   result in no attenuation (note that 0 is not on this list):
3471    ///   + ZX_RIGHT_SAME_RIGHTS (preferred)
3472    ///   + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
3473    /// + request `token_request` The server end of the `BufferCollectionToken`
3474    ///   channel. The client retains the client end.
3475    pub fn r#attach_token(
3476        &self,
3477        mut payload: BufferCollectionAttachTokenRequest,
3478    ) -> Result<(), fidl::Error> {
3479        BufferCollectionProxyInterface::r#attach_token(self, payload)
3480    }
3481
3482    /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
3483    /// buffers have been allocated and only the specified number of buffers (or
3484    /// fewer) remain in the buffer collection.
3485    ///
3486    /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
3487    /// client to wait until an old buffer collection is fully or mostly
3488    /// deallocated before attempting allocation of a new buffer collection. The
3489    /// eventpair is only signalled when the buffers of this collection have
3490    /// been fully deallocated (not just un-referenced by clients, but all the
3491    /// memory consumed by those buffers has been fully reclaimed/recycled), or
3492    /// when allocation or logical allocation fails for the tree or subtree
3493    /// including this [`fuchsia.sysmem2/BufferCollection`].
3494    ///
3495    /// The eventpair won't be signalled until allocation or logical allocation
3496    /// has completed; until then, the collection's current buffer count is
3497    /// ignored.
3498    ///
3499    /// If logical allocation fails for an attached subtree (using
3500    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
3501    /// eventpair will close during that failure regardless of the number of
3502    /// buffers potenitally allocated in the overall buffer collection. This is
3503    /// for logical allocation consistency with normal allocation.
3504    ///
3505    /// The lifetime signalled by this event includes asynchronous cleanup of
3506    /// allocated buffers, and this asynchronous cleanup cannot occur until all
3507    /// holders of VMO handles to the buffers have closed those VMO handles.
3508    /// Therefore, clients should take care not to become blocked forever
3509    /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
3510    /// participants using the logical buffer collection (including the waiter
3511    /// itself) are less trusted, less reliable, or potentially blocked by the
3512    /// wait itself. Waiting asynchronously is recommended. Setting a deadline
3513    /// for the client wait may be prudent, depending on details of how the
3514    /// collection and/or its VMOs are used or shared. Failure to allocate a
3515    /// new/replacement buffer collection is better than getting stuck forever.
3516    ///
3517    /// The sysmem server itself intentionally does not perform any waiting on
3518    /// already-failed collections' VMOs to finish cleaning up before attempting
3519    /// a new allocation, and the sysmem server intentionally doesn't retry
3520    /// allocation if a new allocation fails due to out of memory, even if that
3521    /// failure is potentially due to continued existence of an old collection's
3522    /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
3523    /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
3524    /// as long as the waiting client is careful to not create a deadlock.
3525    ///
3526    /// Continued existence of old collections that are still cleaning up is not
3527    /// the only reason that a new allocation may fail due to insufficient
3528    /// memory, even if the new allocation is allocating physically contiguous
3529    /// buffers. Overall system memory pressure can also be the cause of failure
3530    /// to allocate a new collection. See also
3531    /// [`fuchsia.memorypressure/Provider`].
3532    ///
3533    /// `AttachLifetimeTracking` is meant to be compatible with other protocols
3534    /// with a similar `AttachLifetimeTracking` message; duplicates of the same
3535    /// `eventpair` handle (server end) can be sent via more than one
3536    /// `AttachLifetimeTracking` message to different protocols, and the
3537    /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
3538    /// the conditions are met (all holders of duplicates have closed their
3539    /// server end handle(s)). Also, thanks to how eventpair endponts work, the
3540    /// client end can (also) be duplicated without preventing the
3541    /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
3542    ///
3543    /// The server intentionally doesn't "trust" any signals set on the
3544    /// `server_end`. This mechanism intentionally uses only
3545    /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
3546    /// "early", and is only set when all handles to the server end eventpair
3547    /// are closed. No meaning is associated with any of the other signals, and
3548    /// clients should ignore any other signal bits on either end of the
3549    /// `eventpair`.
3550    ///
3551    /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
3552    /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
3553    /// transfer without causing `BufferCollection` channel failure).
3554    ///
3555    /// All table fields are currently required.
3556    ///
3557    /// + request `server_end` This eventpair handle will be closed by the
3558    ///   sysmem server when buffers have been allocated initially and the
3559    ///   number of buffers is then less than or equal to `buffers_remaining`.
3560    /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
3561    ///   fewer) buffers to be fully deallocated. A number greater than zero can
3562    ///   be useful in situations where a known number of buffers are
3563    ///   intentionally not closed so that the data can continue to be used,
3564    ///   such as for keeping the last available video frame displayed in the UI
3565    ///   even if the video stream was using protected output buffers. It's
3566    ///   outside the scope of the `BufferCollection` interface (at least for
3567    ///   now) to determine how many buffers may be held without closing, but
3568    ///   it'll typically be in the range 0-2.
3569    pub fn r#attach_lifetime_tracking(
3570        &self,
3571        mut payload: BufferCollectionAttachLifetimeTrackingRequest,
3572    ) -> Result<(), fidl::Error> {
3573        BufferCollectionProxyInterface::r#attach_lifetime_tracking(self, payload)
3574    }
3575}
3576
3577impl BufferCollectionProxyInterface for BufferCollectionProxy {
3578    type SyncResponseFut =
3579        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
3580    fn r#sync(&self) -> Self::SyncResponseFut {
3581        fn _decode(
3582            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3583        ) -> Result<(), fidl::Error> {
3584            let _response = fidl::client::decode_transaction_body::<
3585                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
3586                fidl::encoding::DefaultFuchsiaResourceDialect,
3587                0x11ac2555cf575b54,
3588            >(_buf?)?
3589            .into_result::<BufferCollectionMarker>("sync")?;
3590            Ok(_response)
3591        }
3592        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
3593            (),
3594            0x11ac2555cf575b54,
3595            fidl::encoding::DynamicFlags::FLEXIBLE,
3596            _decode,
3597        )
3598    }
3599
3600    fn r#release(&self) -> Result<(), fidl::Error> {
3601        self.client.send::<fidl::encoding::EmptyPayload>(
3602            (),
3603            0x6a5cae7d6d6e04c6,
3604            fidl::encoding::DynamicFlags::FLEXIBLE,
3605        )
3606    }
3607
3608    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
3609        self.client.send::<NodeSetNameRequest>(
3610            payload,
3611            0xb41f1624f48c1e9,
3612            fidl::encoding::DynamicFlags::FLEXIBLE,
3613        )
3614    }
3615
3616    fn r#set_debug_client_info(
3617        &self,
3618        mut payload: &NodeSetDebugClientInfoRequest,
3619    ) -> Result<(), fidl::Error> {
3620        self.client.send::<NodeSetDebugClientInfoRequest>(
3621            payload,
3622            0x5cde8914608d99b1,
3623            fidl::encoding::DynamicFlags::FLEXIBLE,
3624        )
3625    }
3626
3627    fn r#set_debug_timeout_log_deadline(
3628        &self,
3629        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
3630    ) -> Result<(), fidl::Error> {
3631        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
3632            payload,
3633            0x716b0af13d5c0806,
3634            fidl::encoding::DynamicFlags::FLEXIBLE,
3635        )
3636    }
3637
3638    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
3639        self.client.send::<fidl::encoding::EmptyPayload>(
3640            (),
3641            0x5209c77415b4dfad,
3642            fidl::encoding::DynamicFlags::FLEXIBLE,
3643        )
3644    }
3645
3646    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
3647        NodeGetNodeRefResponse,
3648        fidl::encoding::DefaultFuchsiaResourceDialect,
3649    >;
3650    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
3651        fn _decode(
3652            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3653        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
3654            let _response = fidl::client::decode_transaction_body::<
3655                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
3656                fidl::encoding::DefaultFuchsiaResourceDialect,
3657                0x5b3d0e51614df053,
3658            >(_buf?)?
3659            .into_result::<BufferCollectionMarker>("get_node_ref")?;
3660            Ok(_response)
3661        }
3662        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
3663            (),
3664            0x5b3d0e51614df053,
3665            fidl::encoding::DynamicFlags::FLEXIBLE,
3666            _decode,
3667        )
3668    }
3669
3670    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
3671        NodeIsAlternateForResult,
3672        fidl::encoding::DefaultFuchsiaResourceDialect,
3673    >;
3674    fn r#is_alternate_for(
3675        &self,
3676        mut payload: NodeIsAlternateForRequest,
3677    ) -> Self::IsAlternateForResponseFut {
3678        fn _decode(
3679            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3680        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
3681            let _response = fidl::client::decode_transaction_body::<
3682                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
3683                fidl::encoding::DefaultFuchsiaResourceDialect,
3684                0x3a58e00157e0825,
3685            >(_buf?)?
3686            .into_result::<BufferCollectionMarker>("is_alternate_for")?;
3687            Ok(_response.map(|x| x))
3688        }
3689        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
3690            &mut payload,
3691            0x3a58e00157e0825,
3692            fidl::encoding::DynamicFlags::FLEXIBLE,
3693            _decode,
3694        )
3695    }
3696
3697    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
3698        NodeGetBufferCollectionIdResponse,
3699        fidl::encoding::DefaultFuchsiaResourceDialect,
3700    >;
3701    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
3702        fn _decode(
3703            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3704        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
3705            let _response = fidl::client::decode_transaction_body::<
3706                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
3707                fidl::encoding::DefaultFuchsiaResourceDialect,
3708                0x77d19a494b78ba8c,
3709            >(_buf?)?
3710            .into_result::<BufferCollectionMarker>("get_buffer_collection_id")?;
3711            Ok(_response)
3712        }
3713        self.client.send_query_and_decode::<
3714            fidl::encoding::EmptyPayload,
3715            NodeGetBufferCollectionIdResponse,
3716        >(
3717            (),
3718            0x77d19a494b78ba8c,
3719            fidl::encoding::DynamicFlags::FLEXIBLE,
3720            _decode,
3721        )
3722    }
3723
3724    fn r#set_weak(&self) -> Result<(), fidl::Error> {
3725        self.client.send::<fidl::encoding::EmptyPayload>(
3726            (),
3727            0x22dd3ea514eeffe1,
3728            fidl::encoding::DynamicFlags::FLEXIBLE,
3729        )
3730    }
3731
3732    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
3733        self.client.send::<NodeSetWeakOkRequest>(
3734            &mut payload,
3735            0x38a44fc4d7724be9,
3736            fidl::encoding::DynamicFlags::FLEXIBLE,
3737        )
3738    }
3739
3740    fn r#attach_node_tracking(
3741        &self,
3742        mut payload: NodeAttachNodeTrackingRequest,
3743    ) -> Result<(), fidl::Error> {
3744        self.client.send::<NodeAttachNodeTrackingRequest>(
3745            &mut payload,
3746            0x3f22f2a293d3cdac,
3747            fidl::encoding::DynamicFlags::FLEXIBLE,
3748        )
3749    }
3750
3751    fn r#set_constraints(
3752        &self,
3753        mut payload: BufferCollectionSetConstraintsRequest,
3754    ) -> Result<(), fidl::Error> {
3755        self.client.send::<BufferCollectionSetConstraintsRequest>(
3756            &mut payload,
3757            0x1fde0f19d650197b,
3758            fidl::encoding::DynamicFlags::FLEXIBLE,
3759        )
3760    }
3761
3762    type WaitForAllBuffersAllocatedResponseFut = fidl::client::QueryResponseFut<
3763        BufferCollectionWaitForAllBuffersAllocatedResult,
3764        fidl::encoding::DefaultFuchsiaResourceDialect,
3765    >;
3766    fn r#wait_for_all_buffers_allocated(&self) -> Self::WaitForAllBuffersAllocatedResponseFut {
3767        fn _decode(
3768            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3769        ) -> Result<BufferCollectionWaitForAllBuffersAllocatedResult, fidl::Error> {
3770            let _response = fidl::client::decode_transaction_body::<
3771                fidl::encoding::FlexibleResultType<
3772                    BufferCollectionWaitForAllBuffersAllocatedResponse,
3773                    Error,
3774                >,
3775                fidl::encoding::DefaultFuchsiaResourceDialect,
3776                0x62300344b61404e,
3777            >(_buf?)?
3778            .into_result::<BufferCollectionMarker>("wait_for_all_buffers_allocated")?;
3779            Ok(_response.map(|x| x))
3780        }
3781        self.client.send_query_and_decode::<
3782            fidl::encoding::EmptyPayload,
3783            BufferCollectionWaitForAllBuffersAllocatedResult,
3784        >(
3785            (),
3786            0x62300344b61404e,
3787            fidl::encoding::DynamicFlags::FLEXIBLE,
3788            _decode,
3789        )
3790    }
3791
3792    type CheckAllBuffersAllocatedResponseFut = fidl::client::QueryResponseFut<
3793        BufferCollectionCheckAllBuffersAllocatedResult,
3794        fidl::encoding::DefaultFuchsiaResourceDialect,
3795    >;
3796    fn r#check_all_buffers_allocated(&self) -> Self::CheckAllBuffersAllocatedResponseFut {
3797        fn _decode(
3798            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
3799        ) -> Result<BufferCollectionCheckAllBuffersAllocatedResult, fidl::Error> {
3800            let _response = fidl::client::decode_transaction_body::<
3801                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
3802                fidl::encoding::DefaultFuchsiaResourceDialect,
3803                0x35a5fe77ce939c10,
3804            >(_buf?)?
3805            .into_result::<BufferCollectionMarker>("check_all_buffers_allocated")?;
3806            Ok(_response.map(|x| x))
3807        }
3808        self.client.send_query_and_decode::<
3809            fidl::encoding::EmptyPayload,
3810            BufferCollectionCheckAllBuffersAllocatedResult,
3811        >(
3812            (),
3813            0x35a5fe77ce939c10,
3814            fidl::encoding::DynamicFlags::FLEXIBLE,
3815            _decode,
3816        )
3817    }
3818
3819    fn r#attach_token(
3820        &self,
3821        mut payload: BufferCollectionAttachTokenRequest,
3822    ) -> Result<(), fidl::Error> {
3823        self.client.send::<BufferCollectionAttachTokenRequest>(
3824            &mut payload,
3825            0x46ac7d0008492982,
3826            fidl::encoding::DynamicFlags::FLEXIBLE,
3827        )
3828    }
3829
3830    fn r#attach_lifetime_tracking(
3831        &self,
3832        mut payload: BufferCollectionAttachLifetimeTrackingRequest,
3833    ) -> Result<(), fidl::Error> {
3834        self.client.send::<BufferCollectionAttachLifetimeTrackingRequest>(
3835            &mut payload,
3836            0x3ecb510113116dcf,
3837            fidl::encoding::DynamicFlags::FLEXIBLE,
3838        )
3839    }
3840}
3841
3842pub struct BufferCollectionEventStream {
3843    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
3844}
3845
3846impl std::marker::Unpin for BufferCollectionEventStream {}
3847
3848impl futures::stream::FusedStream for BufferCollectionEventStream {
3849    fn is_terminated(&self) -> bool {
3850        self.event_receiver.is_terminated()
3851    }
3852}
3853
3854impl futures::Stream for BufferCollectionEventStream {
3855    type Item = Result<BufferCollectionEvent, fidl::Error>;
3856
3857    fn poll_next(
3858        mut self: std::pin::Pin<&mut Self>,
3859        cx: &mut std::task::Context<'_>,
3860    ) -> std::task::Poll<Option<Self::Item>> {
3861        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
3862            &mut self.event_receiver,
3863            cx
3864        )?) {
3865            Some(buf) => std::task::Poll::Ready(Some(BufferCollectionEvent::decode(buf))),
3866            None => std::task::Poll::Ready(None),
3867        }
3868    }
3869}
3870
3871#[derive(Debug)]
3872pub enum BufferCollectionEvent {
3873    #[non_exhaustive]
3874    _UnknownEvent {
3875        /// Ordinal of the event that was sent.
3876        ordinal: u64,
3877    },
3878}
3879
3880impl BufferCollectionEvent {
3881    /// Decodes a message buffer as a [`BufferCollectionEvent`].
3882    fn decode(
3883        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
3884    ) -> Result<BufferCollectionEvent, fidl::Error> {
3885        let (bytes, _handles) = buf.split_mut();
3886        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
3887        debug_assert_eq!(tx_header.tx_id, 0);
3888        match tx_header.ordinal {
3889            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
3890                Ok(BufferCollectionEvent::_UnknownEvent { ordinal: tx_header.ordinal })
3891            }
3892            _ => Err(fidl::Error::UnknownOrdinal {
3893                ordinal: tx_header.ordinal,
3894                protocol_name:
3895                    <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
3896            }),
3897        }
3898    }
3899}
3900
3901/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollection.
3902pub struct BufferCollectionRequestStream {
3903    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
3904    is_terminated: bool,
3905}
3906
3907impl std::marker::Unpin for BufferCollectionRequestStream {}
3908
3909impl futures::stream::FusedStream for BufferCollectionRequestStream {
3910    fn is_terminated(&self) -> bool {
3911        self.is_terminated
3912    }
3913}
3914
3915impl fidl::endpoints::RequestStream for BufferCollectionRequestStream {
3916    type Protocol = BufferCollectionMarker;
3917    type ControlHandle = BufferCollectionControlHandle;
3918
3919    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
3920        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
3921    }
3922
3923    fn control_handle(&self) -> Self::ControlHandle {
3924        BufferCollectionControlHandle { inner: self.inner.clone() }
3925    }
3926
3927    fn into_inner(
3928        self,
3929    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
3930    {
3931        (self.inner, self.is_terminated)
3932    }
3933
3934    fn from_inner(
3935        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
3936        is_terminated: bool,
3937    ) -> Self {
3938        Self { inner, is_terminated }
3939    }
3940}
3941
3942impl futures::Stream for BufferCollectionRequestStream {
3943    type Item = Result<BufferCollectionRequest, fidl::Error>;
3944
3945    fn poll_next(
3946        mut self: std::pin::Pin<&mut Self>,
3947        cx: &mut std::task::Context<'_>,
3948    ) -> std::task::Poll<Option<Self::Item>> {
3949        let this = &mut *self;
3950        if this.inner.check_shutdown(cx) {
3951            this.is_terminated = true;
3952            return std::task::Poll::Ready(None);
3953        }
3954        if this.is_terminated {
3955            panic!("polled BufferCollectionRequestStream after completion");
3956        }
3957        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
3958            |bytes, handles| {
3959                match this.inner.channel().read_etc(cx, bytes, handles) {
3960                    std::task::Poll::Ready(Ok(())) => {}
3961                    std::task::Poll::Pending => return std::task::Poll::Pending,
3962                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
3963                        this.is_terminated = true;
3964                        return std::task::Poll::Ready(None);
3965                    }
3966                    std::task::Poll::Ready(Err(e)) => {
3967                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
3968                            e.into(),
3969                        ))))
3970                    }
3971                }
3972
3973                // A message has been received from the channel
3974                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
3975
3976                std::task::Poll::Ready(Some(match header.ordinal {
3977                    0x11ac2555cf575b54 => {
3978                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
3979                        let mut req = fidl::new_empty!(
3980                            fidl::encoding::EmptyPayload,
3981                            fidl::encoding::DefaultFuchsiaResourceDialect
3982                        );
3983                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
3984                        let control_handle =
3985                            BufferCollectionControlHandle { inner: this.inner.clone() };
3986                        Ok(BufferCollectionRequest::Sync {
3987                            responder: BufferCollectionSyncResponder {
3988                                control_handle: std::mem::ManuallyDrop::new(control_handle),
3989                                tx_id: header.tx_id,
3990                            },
3991                        })
3992                    }
3993                    0x6a5cae7d6d6e04c6 => {
3994                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
3995                        let mut req = fidl::new_empty!(
3996                            fidl::encoding::EmptyPayload,
3997                            fidl::encoding::DefaultFuchsiaResourceDialect
3998                        );
3999                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4000                        let control_handle =
4001                            BufferCollectionControlHandle { inner: this.inner.clone() };
4002                        Ok(BufferCollectionRequest::Release { control_handle })
4003                    }
4004                    0xb41f1624f48c1e9 => {
4005                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4006                        let mut req = fidl::new_empty!(
4007                            NodeSetNameRequest,
4008                            fidl::encoding::DefaultFuchsiaResourceDialect
4009                        );
4010                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
4011                        let control_handle =
4012                            BufferCollectionControlHandle { inner: this.inner.clone() };
4013                        Ok(BufferCollectionRequest::SetName { payload: req, control_handle })
4014                    }
4015                    0x5cde8914608d99b1 => {
4016                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4017                        let mut req = fidl::new_empty!(
4018                            NodeSetDebugClientInfoRequest,
4019                            fidl::encoding::DefaultFuchsiaResourceDialect
4020                        );
4021                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
4022                        let control_handle =
4023                            BufferCollectionControlHandle { inner: this.inner.clone() };
4024                        Ok(BufferCollectionRequest::SetDebugClientInfo {
4025                            payload: req,
4026                            control_handle,
4027                        })
4028                    }
4029                    0x716b0af13d5c0806 => {
4030                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4031                        let mut req = fidl::new_empty!(
4032                            NodeSetDebugTimeoutLogDeadlineRequest,
4033                            fidl::encoding::DefaultFuchsiaResourceDialect
4034                        );
4035                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
4036                        let control_handle =
4037                            BufferCollectionControlHandle { inner: this.inner.clone() };
4038                        Ok(BufferCollectionRequest::SetDebugTimeoutLogDeadline {
4039                            payload: req,
4040                            control_handle,
4041                        })
4042                    }
4043                    0x5209c77415b4dfad => {
4044                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4045                        let mut req = fidl::new_empty!(
4046                            fidl::encoding::EmptyPayload,
4047                            fidl::encoding::DefaultFuchsiaResourceDialect
4048                        );
4049                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4050                        let control_handle =
4051                            BufferCollectionControlHandle { inner: this.inner.clone() };
4052                        Ok(BufferCollectionRequest::SetVerboseLogging { control_handle })
4053                    }
4054                    0x5b3d0e51614df053 => {
4055                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4056                        let mut req = fidl::new_empty!(
4057                            fidl::encoding::EmptyPayload,
4058                            fidl::encoding::DefaultFuchsiaResourceDialect
4059                        );
4060                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4061                        let control_handle =
4062                            BufferCollectionControlHandle { inner: this.inner.clone() };
4063                        Ok(BufferCollectionRequest::GetNodeRef {
4064                            responder: BufferCollectionGetNodeRefResponder {
4065                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4066                                tx_id: header.tx_id,
4067                            },
4068                        })
4069                    }
4070                    0x3a58e00157e0825 => {
4071                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4072                        let mut req = fidl::new_empty!(
4073                            NodeIsAlternateForRequest,
4074                            fidl::encoding::DefaultFuchsiaResourceDialect
4075                        );
4076                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
4077                        let control_handle =
4078                            BufferCollectionControlHandle { inner: this.inner.clone() };
4079                        Ok(BufferCollectionRequest::IsAlternateFor {
4080                            payload: req,
4081                            responder: BufferCollectionIsAlternateForResponder {
4082                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4083                                tx_id: header.tx_id,
4084                            },
4085                        })
4086                    }
4087                    0x77d19a494b78ba8c => {
4088                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4089                        let mut req = fidl::new_empty!(
4090                            fidl::encoding::EmptyPayload,
4091                            fidl::encoding::DefaultFuchsiaResourceDialect
4092                        );
4093                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4094                        let control_handle =
4095                            BufferCollectionControlHandle { inner: this.inner.clone() };
4096                        Ok(BufferCollectionRequest::GetBufferCollectionId {
4097                            responder: BufferCollectionGetBufferCollectionIdResponder {
4098                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4099                                tx_id: header.tx_id,
4100                            },
4101                        })
4102                    }
4103                    0x22dd3ea514eeffe1 => {
4104                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4105                        let mut req = fidl::new_empty!(
4106                            fidl::encoding::EmptyPayload,
4107                            fidl::encoding::DefaultFuchsiaResourceDialect
4108                        );
4109                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4110                        let control_handle =
4111                            BufferCollectionControlHandle { inner: this.inner.clone() };
4112                        Ok(BufferCollectionRequest::SetWeak { control_handle })
4113                    }
4114                    0x38a44fc4d7724be9 => {
4115                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4116                        let mut req = fidl::new_empty!(
4117                            NodeSetWeakOkRequest,
4118                            fidl::encoding::DefaultFuchsiaResourceDialect
4119                        );
4120                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
4121                        let control_handle =
4122                            BufferCollectionControlHandle { inner: this.inner.clone() };
4123                        Ok(BufferCollectionRequest::SetWeakOk { payload: req, control_handle })
4124                    }
4125                    0x3f22f2a293d3cdac => {
4126                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4127                        let mut req = fidl::new_empty!(
4128                            NodeAttachNodeTrackingRequest,
4129                            fidl::encoding::DefaultFuchsiaResourceDialect
4130                        );
4131                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
4132                        let control_handle =
4133                            BufferCollectionControlHandle { inner: this.inner.clone() };
4134                        Ok(BufferCollectionRequest::AttachNodeTracking {
4135                            payload: req,
4136                            control_handle,
4137                        })
4138                    }
4139                    0x1fde0f19d650197b => {
4140                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4141                        let mut req = fidl::new_empty!(
4142                            BufferCollectionSetConstraintsRequest,
4143                            fidl::encoding::DefaultFuchsiaResourceDialect
4144                        );
4145                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionSetConstraintsRequest>(&header, _body_bytes, handles, &mut req)?;
4146                        let control_handle =
4147                            BufferCollectionControlHandle { inner: this.inner.clone() };
4148                        Ok(BufferCollectionRequest::SetConstraints { payload: req, control_handle })
4149                    }
4150                    0x62300344b61404e => {
4151                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4152                        let mut req = fidl::new_empty!(
4153                            fidl::encoding::EmptyPayload,
4154                            fidl::encoding::DefaultFuchsiaResourceDialect
4155                        );
4156                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4157                        let control_handle =
4158                            BufferCollectionControlHandle { inner: this.inner.clone() };
4159                        Ok(BufferCollectionRequest::WaitForAllBuffersAllocated {
4160                            responder: BufferCollectionWaitForAllBuffersAllocatedResponder {
4161                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4162                                tx_id: header.tx_id,
4163                            },
4164                        })
4165                    }
4166                    0x35a5fe77ce939c10 => {
4167                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
4168                        let mut req = fidl::new_empty!(
4169                            fidl::encoding::EmptyPayload,
4170                            fidl::encoding::DefaultFuchsiaResourceDialect
4171                        );
4172                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
4173                        let control_handle =
4174                            BufferCollectionControlHandle { inner: this.inner.clone() };
4175                        Ok(BufferCollectionRequest::CheckAllBuffersAllocated {
4176                            responder: BufferCollectionCheckAllBuffersAllocatedResponder {
4177                                control_handle: std::mem::ManuallyDrop::new(control_handle),
4178                                tx_id: header.tx_id,
4179                            },
4180                        })
4181                    }
4182                    0x46ac7d0008492982 => {
4183                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4184                        let mut req = fidl::new_empty!(
4185                            BufferCollectionAttachTokenRequest,
4186                            fidl::encoding::DefaultFuchsiaResourceDialect
4187                        );
4188                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionAttachTokenRequest>(&header, _body_bytes, handles, &mut req)?;
4189                        let control_handle =
4190                            BufferCollectionControlHandle { inner: this.inner.clone() };
4191                        Ok(BufferCollectionRequest::AttachToken { payload: req, control_handle })
4192                    }
4193                    0x3ecb510113116dcf => {
4194                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
4195                        let mut req = fidl::new_empty!(
4196                            BufferCollectionAttachLifetimeTrackingRequest,
4197                            fidl::encoding::DefaultFuchsiaResourceDialect
4198                        );
4199                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionAttachLifetimeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
4200                        let control_handle =
4201                            BufferCollectionControlHandle { inner: this.inner.clone() };
4202                        Ok(BufferCollectionRequest::AttachLifetimeTracking {
4203                            payload: req,
4204                            control_handle,
4205                        })
4206                    }
4207                    _ if header.tx_id == 0
4208                        && header
4209                            .dynamic_flags()
4210                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
4211                    {
4212                        Ok(BufferCollectionRequest::_UnknownMethod {
4213                            ordinal: header.ordinal,
4214                            control_handle: BufferCollectionControlHandle {
4215                                inner: this.inner.clone(),
4216                            },
4217                            method_type: fidl::MethodType::OneWay,
4218                        })
4219                    }
4220                    _ if header
4221                        .dynamic_flags()
4222                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
4223                    {
4224                        this.inner.send_framework_err(
4225                            fidl::encoding::FrameworkErr::UnknownMethod,
4226                            header.tx_id,
4227                            header.ordinal,
4228                            header.dynamic_flags(),
4229                            (bytes, handles),
4230                        )?;
4231                        Ok(BufferCollectionRequest::_UnknownMethod {
4232                            ordinal: header.ordinal,
4233                            control_handle: BufferCollectionControlHandle {
4234                                inner: this.inner.clone(),
4235                            },
4236                            method_type: fidl::MethodType::TwoWay,
4237                        })
4238                    }
4239                    _ => Err(fidl::Error::UnknownOrdinal {
4240                        ordinal: header.ordinal,
4241                        protocol_name:
4242                            <BufferCollectionMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
4243                    }),
4244                }))
4245            },
4246        )
4247    }
4248}
4249
4250/// [`fuchsia.sysmem2/BufferCollection`] is a connection directly from a
4251/// participant to sysmem re. a buffer collection; often the buffer collection
4252/// is shared with other participants which have their own `BufferCollection`
4253/// client end(s) associated with the same buffer collection.  In other words,
4254/// an instance of the `BufferCollection` interface is a view of a buffer
4255/// collection, not the buffer collection itself.
4256///
4257/// The `BufferCollection` connection exists to facilitate async indication of
4258/// when the buffer collection has been populated with buffers.
4259///
4260/// Also, the channel's closure by the sysmem server is an indication to the
4261/// client that the client should close all VMO handles that were obtained from
4262/// the `BufferCollection` ASAP.
4263///
4264/// Some buffer collections can use enough memory that it can be worth avoiding
4265/// allocation overlap (in time) using
4266/// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] so that the
4267/// initiator can tell when enough buffers of the buffer collection have been
4268/// fully deallocated prior to the initiator allocating a new buffer collection.
4269///
4270/// Epitaphs are not used in this protocol.
4271#[derive(Debug)]
4272pub enum BufferCollectionRequest {
4273    /// Ensure that previous messages have been received server side. This is
4274    /// particularly useful after previous messages that created new tokens,
4275    /// because a token must be known to the sysmem server before sending the
4276    /// token to another participant.
4277    ///
4278    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
4279    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
4280    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
4281    /// to mitigate the possibility of a hostile/fake
4282    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
4283    /// Another way is to pass the token to
4284    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
4285    /// the token as part of exchanging it for a
4286    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
4287    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
4288    /// of stalling.
4289    ///
4290    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
4291    /// and then starting and completing a `Sync`, it's then safe to send the
4292    /// `BufferCollectionToken` client ends to other participants knowing the
4293    /// server will recognize the tokens when they're sent by the other
4294    /// participants to sysmem in a
4295    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
4296    /// efficient way to create tokens while avoiding unnecessary round trips.
4297    ///
4298    /// Other options include waiting for each
4299    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
4300    /// individually (using separate call to `Sync` after each), or calling
4301    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
4302    /// converted to a `BufferCollection` via
4303    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
4304    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
4305    /// the sync step and can create multiple tokens at once.
4306    Sync { responder: BufferCollectionSyncResponder },
4307    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
4308    ///
4309    /// Normally a participant will convert a `BufferCollectionToken` into a
4310    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
4311    /// `Release` via the token (and then close the channel immediately or
4312    /// shortly later in response to server closing the server end), which
4313    /// avoids causing buffer collection failure. Without a prior `Release`,
4314    /// closing the `BufferCollectionToken` client end will cause buffer
4315    /// collection failure.
4316    ///
4317    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
4318    ///
4319    /// By default the server handles unexpected closure of a
4320    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
4321    /// first) by failing the buffer collection. Partly this is to expedite
4322    /// closing VMO handles to reclaim memory when any participant fails. If a
4323    /// participant would like to cleanly close a `BufferCollection` without
4324    /// causing buffer collection failure, the participant can send `Release`
4325    /// before closing the `BufferCollection` client end. The `Release` can
4326    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
4327    /// buffer collection won't require constraints from this node in order to
4328    /// allocate. If after `SetConstraints`, the constraints are retained and
4329    /// aggregated, despite the lack of `BufferCollection` connection at the
4330    /// time of constraints aggregation.
4331    ///
4332    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
4333    ///
4334    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
4335    /// end (without `Release` first) will trigger failure of the buffer
4336    /// collection. To close a `BufferCollectionTokenGroup` channel without
4337    /// failing the buffer collection, ensure that AllChildrenPresent() has been
4338    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
4339    /// client end.
4340    ///
4341    /// If `Release` occurs before
4342    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
4343    /// buffer collection will fail (triggered by reception of `Release` without
4344    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
4345    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
4346    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
4347    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
4348    /// close requires `AllChildrenPresent` (if not already sent), then
4349    /// `Release`, then close client end.
4350    ///
4351    /// If `Release` occurs after `AllChildrenPresent`, the children and all
4352    /// their constraints remain intact (just as they would if the
4353    /// `BufferCollectionTokenGroup` channel had remained open), and the client
4354    /// end close doesn't trigger buffer collection failure.
4355    ///
4356    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
4357    ///
4358    /// For brevity, the per-channel-protocol paragraphs above ignore the
4359    /// separate failure domain created by
4360    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
4361    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
4362    /// unexpectedly closes (without `Release` first) and that client end is
4363    /// under a failure domain, instead of failing the whole buffer collection,
4364    /// the failure domain is failed, but the buffer collection itself is
4365    /// isolated from failure of the failure domain. Such failure domains can be
4366    /// nested, in which case only the inner-most failure domain in which the
4367    /// `Node` resides fails.
4368    Release { control_handle: BufferCollectionControlHandle },
4369    /// Set a name for VMOs in this buffer collection.
4370    ///
4371    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
4372    /// will be truncated to fit. The name of the vmo will be suffixed with the
4373    /// buffer index within the collection (if the suffix fits within
4374    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
4375    /// listed in the inspect data.
4376    ///
4377    /// The name only affects VMOs allocated after the name is set; this call
4378    /// does not rename existing VMOs. If multiple clients set different names
4379    /// then the larger priority value will win. Setting a new name with the
4380    /// same priority as a prior name doesn't change the name.
4381    ///
4382    /// All table fields are currently required.
4383    ///
4384    /// + request `priority` The name is only set if this is the first `SetName`
4385    ///   or if `priority` is greater than any previous `priority` value in
4386    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
4387    /// + request `name` The name for VMOs created under this buffer collection.
4388    SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionControlHandle },
4389    /// Set information about the current client that can be used by sysmem to
4390    /// help diagnose leaking memory and allocation stalls waiting for a
4391    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
4392    ///
4393    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
4394    /// `Node`(s) derived from this `Node`, unless overriden by
4395    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
4396    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
4397    ///
4398    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
4399    /// `Allocator` is the most efficient way to ensure that all
4400    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
4401    /// set, and is also more efficient than separately sending the same debug
4402    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
4403    /// created [`fuchsia.sysmem2/Node`].
4404    ///
4405    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
4406    /// indicate which client is closing their channel first, leading to subtree
4407    /// failure (which can be normal if the purpose of the subtree is over, but
4408    /// if happening earlier than expected, the client-channel-specific name can
4409    /// help diagnose where the failure is first coming from, from sysmem's
4410    /// point of view).
4411    ///
4412    /// All table fields are currently required.
4413    ///
4414    /// + request `name` This can be an arbitrary string, but the current
4415    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
4416    /// + request `id` This can be an arbitrary id, but the current process ID
4417    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
4418    SetDebugClientInfo {
4419        payload: NodeSetDebugClientInfoRequest,
4420        control_handle: BufferCollectionControlHandle,
4421    },
4422    /// Sysmem logs a warning if sysmem hasn't seen
4423    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
4424    /// within 5 seconds after creation of a new collection.
4425    ///
4426    /// Clients can call this method to change when the log is printed. If
4427    /// multiple client set the deadline, it's unspecified which deadline will
4428    /// take effect.
4429    ///
4430    /// In most cases the default works well.
4431    ///
4432    /// All table fields are currently required.
4433    ///
4434    /// + request `deadline` The time at which sysmem will start trying to log
4435    ///   the warning, unless all constraints are with sysmem by then.
4436    SetDebugTimeoutLogDeadline {
4437        payload: NodeSetDebugTimeoutLogDeadlineRequest,
4438        control_handle: BufferCollectionControlHandle,
4439    },
4440    /// This enables verbose logging for the buffer collection.
4441    ///
4442    /// Verbose logging includes constraints set via
4443    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
4444    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
4445    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
4446    /// the tree of `Node`(s).
4447    ///
4448    /// Normally sysmem prints only a single line complaint when aggregation
4449    /// fails, with just the specific detailed reason that aggregation failed,
4450    /// with little surrounding context.  While this is often enough to diagnose
4451    /// a problem if only a small change was made and everything was working
4452    /// before the small change, it's often not particularly helpful for getting
4453    /// a new buffer collection to work for the first time.  Especially with
4454    /// more complex trees of nodes, involving things like
4455    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
4456    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
4457    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
4458    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
4459    /// looks like and why it's failing a logical allocation, or why a tree or
4460    /// subtree is failing sooner than expected.
4461    ///
4462    /// The intent of the extra logging is to be acceptable from a performance
4463    /// point of view, under the assumption that verbose logging is only enabled
4464    /// on a low number of buffer collections. If we're not tracking down a bug,
4465    /// we shouldn't send this message.
4466    SetVerboseLogging { control_handle: BufferCollectionControlHandle },
4467    /// This gets a handle that can be used as a parameter to
4468    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
4469    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
4470    /// client obtained this handle from this `Node`.
4471    ///
4472    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
4473    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
4474    /// despite the two calls typically being on different channels.
4475    ///
4476    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
4477    ///
4478    /// All table fields are currently required.
4479    ///
4480    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
4481    ///   different `Node` channel, to prove that the client obtained the handle
4482    ///   from this `Node`.
4483    GetNodeRef { responder: BufferCollectionGetNodeRefResponder },
4484    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
4485    /// rooted at a different child token of a common parent
4486    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
4487    /// passed-in `node_ref`.
4488    ///
4489    /// This call is for assisting with admission control de-duplication, and
4490    /// with debugging.
4491    ///
4492    /// The `node_ref` must be obtained using
4493    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
4494    ///
4495    /// The `node_ref` can be a duplicated handle; it's not necessary to call
4496    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
4497    ///
4498    /// If a calling token may not actually be a valid token at all due to a
4499    /// potentially hostile/untrusted provider of the token, call
4500    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
4501    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
4502    /// never responds due to a calling token not being a real token (not really
4503    /// talking to sysmem).  Another option is to call
4504    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
4505    /// which also validates the token along with converting it to a
4506    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
4507    ///
4508    /// All table fields are currently required.
4509    ///
4510    /// - response `is_alternate`
4511    ///   - true: The first parent node in common between the calling node and
4512    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
4513    ///     that the calling `Node` and the `node_ref` `Node` will not have both
4514    ///     their constraints apply - rather sysmem will choose one or the other
4515    ///     of the constraints - never both.  This is because only one child of
4516    ///     a `BufferCollectionTokenGroup` is selected during logical
4517    ///     allocation, with only that one child's subtree contributing to
4518    ///     constraints aggregation.
4519    ///   - false: The first parent node in common between the calling `Node`
4520    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
4521    ///     Currently, this means the first parent node in common is a
4522    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
4523    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
4524    ///     `Node` may have both their constraints apply during constraints
4525    ///     aggregation of the logical allocation, if both `Node`(s) are
4526    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
4527    ///     this case, there is no `BufferCollectionTokenGroup` that will
4528    ///     directly prevent the two `Node`(s) from both being selected and
4529    ///     their constraints both aggregated, but even when false, one or both
4530    ///     `Node`(s) may still be eliminated from consideration if one or both
4531    ///     `Node`(s) has a direct or indirect parent
4532    ///     `BufferCollectionTokenGroup` which selects a child subtree other
4533    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
4534    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
4535    ///   associated with the same buffer collection as the calling `Node`.
4536    ///   Another reason for this error is if the `node_ref` is an
4537    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
4538    ///   a real `node_ref` obtained from `GetNodeRef`.
4539    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
4540    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
4541    ///   the needed rights expected on a real `node_ref`.
4542    /// * No other failing status codes are returned by this call.  However,
4543    ///   sysmem may add additional codes in future, so the client should have
4544    ///   sensible default handling for any failing status code.
4545    IsAlternateFor {
4546        payload: NodeIsAlternateForRequest,
4547        responder: BufferCollectionIsAlternateForResponder,
4548    },
4549    /// Get the buffer collection ID. This ID is also available from
4550    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
4551    /// within the collection).
4552    ///
4553    /// This call is mainly useful in situations where we can't convey a
4554    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
4555    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
4556    /// handle, which can be joined back up with a `BufferCollection` client end
4557    /// that was created via a different path. Prefer to convey a
4558    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
4559    ///
4560    /// Trusting a `buffer_collection_id` value from a source other than sysmem
4561    /// is analogous to trusting a koid value from a source other than zircon.
4562    /// Both should be avoided unless really necessary, and both require
4563    /// caution. In some situations it may be reasonable to refer to a
4564    /// pre-established `BufferCollection` by `buffer_collection_id` via a
4565    /// protocol for efficiency reasons, but an incoming value purporting to be
4566    /// a `buffer_collection_id` is not sufficient alone to justify granting the
4567    /// sender of the `buffer_collection_id` any capability. The sender must
4568    /// first prove to a receiver that the sender has/had a VMO or has/had a
4569    /// `BufferCollectionToken` to the same collection by sending a handle that
4570    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
4571    /// `buffer_collection_id` value. The receiver should take care to avoid
4572    /// assuming that a sender had a `BufferCollectionToken` in cases where the
4573    /// sender has only proven that the sender had a VMO.
4574    ///
4575    /// - response `buffer_collection_id` This ID is unique per buffer
4576    ///   collection per boot. Each buffer is uniquely identified by the
4577    ///   `buffer_collection_id` and `buffer_index` together.
4578    GetBufferCollectionId { responder: BufferCollectionGetBufferCollectionIdResponder },
4579    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
4580    /// created after this message to weak, which means that a client's `Node`
4581    /// client end (or a child created after this message) is not alone
4582    /// sufficient to keep allocated VMOs alive.
4583    ///
4584    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
4585    /// `close_weak_asap`.
4586    ///
4587    /// This message is only permitted before the `Node` becomes ready for
4588    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
4589    ///   * `BufferCollectionToken`: any time
4590    ///   * `BufferCollection`: before `SetConstraints`
4591    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
4592    ///
4593    /// Currently, no conversion from strong `Node` to weak `Node` after ready
4594    /// for allocation is provided, but a client can simulate that by creating
4595    /// an additional `Node` before allocation and setting that additional
4596    /// `Node` to weak, and then potentially at some point later sending
4597    /// `Release` and closing the client end of the client's strong `Node`, but
4598    /// keeping the client's weak `Node`.
4599    ///
4600    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
4601    /// collection failure (all `Node` client end(s) will see
4602    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
4603    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
4604    /// this situation until all `Node`(s) are ready for allocation. For initial
4605    /// allocation to succeed, at least one strong `Node` is required to exist
4606    /// at allocation time, but after that client receives VMO handles, that
4607    /// client can `BufferCollection.Release` and close the client end without
4608    /// causing this type of failure.
4609    ///
4610    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
4611    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
4612    /// separately as appropriate.
4613    SetWeak { control_handle: BufferCollectionControlHandle },
4614    /// This indicates to sysmem that the client is prepared to pay attention to
4615    /// `close_weak_asap`.
4616    ///
4617    /// If sent, this message must be before
4618    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
4619    ///
4620    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
4621    /// send this message before `WaitForAllBuffersAllocated`, or a parent
4622    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
4623    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
4624    /// trigger buffer collection failure.
4625    ///
4626    /// This message is necessary because weak sysmem VMOs have not always been
4627    /// a thing, so older clients are not aware of the need to pay attention to
4628    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
4629    /// sysmem weak VMO handles asap. By having this message and requiring
4630    /// participants to indicate their acceptance of this aspect of the overall
4631    /// protocol, we avoid situations where an older client is delivered a weak
4632    /// VMO without any way for sysmem to get that VMO to close quickly later
4633    /// (and on a per-buffer basis).
4634    ///
4635    /// A participant that doesn't handle `close_weak_asap` and also doesn't
4636    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
4637    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
4638    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
4639    /// same participant has a child/delegate which does retrieve VMOs, that
4640    /// child/delegate will need to send `SetWeakOk` before
4641    /// `WaitForAllBuffersAllocated`.
4642    ///
4643    /// + request `for_child_nodes_also` If present and true, this means direct
4644    ///   child nodes of this node created after this message plus all
4645    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
4646    ///   those nodes. Any child node of this node that was created before this
4647    ///   message is not included. This setting is "sticky" in the sense that a
4648    ///   subsequent `SetWeakOk` without this bool set to true does not reset
4649    ///   the server-side bool. If this creates a problem for a participant, a
4650    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
4651    ///   tokens instead, as appropriate. A participant should only set
4652    ///   `for_child_nodes_also` true if the participant can really promise to
4653    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
4654    ///   weak VMO handles held by participants holding the corresponding child
4655    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
4656    ///   which are using sysmem(1) can be weak, despite the clients of those
4657    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
4658    ///   direct way to find out about `close_weak_asap`. This only applies to
4659    ///   descendents of this `Node` which are using sysmem(1), not to this
4660    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
4661    ///   token, which will fail allocation unless an ancestor of this `Node`
4662    ///   specified `for_child_nodes_also` true.
4663    SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: BufferCollectionControlHandle },
4664    /// The server_end will be closed after this `Node` and any child nodes have
4665    /// have released their buffer counts, making those counts available for
4666    /// reservation by a different `Node` via
4667    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
4668    ///
4669    /// The `Node` buffer counts may not be released until the entire tree of
4670    /// `Node`(s) is closed or failed, because
4671    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
4672    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
4673    /// `Node` buffer counts remain reserved until the orphaned node is later
4674    /// cleaned up.
4675    ///
4676    /// If the `Node` exceeds a fairly large number of attached eventpair server
4677    /// ends, a log message will indicate this and the `Node` (and the
4678    /// appropriate) sub-tree will fail.
4679    ///
4680    /// The `server_end` will remain open when
4681    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
4682    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
4683    /// [`fuchsia.sysmem2/BufferCollection`].
4684    ///
4685    /// This message can also be used with a
4686    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
4687    AttachNodeTracking {
4688        payload: NodeAttachNodeTrackingRequest,
4689        control_handle: BufferCollectionControlHandle,
4690    },
4691    /// Provide [`fuchsia.sysmem2/BufferCollectionConstraints`] to the buffer
4692    /// collection.
4693    ///
4694    /// A participant may only call
4695    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] up to once per
4696    /// [`fuchsia.sysmem2/BufferCollection`].
4697    ///
4698    /// For buffer allocation to be attempted, all holders of a
4699    /// `BufferCollection` client end need to call `SetConstraints` before
4700    /// sysmem will attempt to allocate buffers.
4701    ///
4702    /// + request `constraints` These are the constraints on the buffer
4703    ///   collection imposed by the sending client/participant.  The
4704    ///   `constraints` field is not required to be set. If not set, the client
4705    ///   is not setting any actual constraints, but is indicating that the
4706    ///   client has no constraints to set. A client that doesn't set the
4707    ///   `constraints` field won't receive any VMO handles, but can still find
4708    ///   out how many buffers were allocated and can still refer to buffers by
4709    ///   their `buffer_index`.
4710    SetConstraints {
4711        payload: BufferCollectionSetConstraintsRequest,
4712        control_handle: BufferCollectionControlHandle,
4713    },
4714    /// Wait until all buffers are allocated.
4715    ///
4716    /// This FIDL call completes when buffers have been allocated, or completes
4717    /// with some failure detail if allocation has been attempted but failed.
4718    ///
4719    /// The following must occur before buffers will be allocated:
4720    ///   * All [`fuchsia.sysmem2/BufferCollectionToken`](s) of the buffer
4721    ///     collection must be turned in via `BindSharedCollection` to get a
4722    ///     [`fuchsia.sysmem2/BufferCollection`] (for brevity, this is assuming
4723    ///     [`fuchsia.sysmem2/BufferCollection.AttachToken`] isn't being used),
4724    ///     or have had [`fuchsia.sysmem2/BufferCollectionToken.Release`] sent
4725    ///     to them.
4726    ///   * All [`fuchsia.sysmem2/BufferCollection`](s) of the buffer collection
4727    ///     must have had [`fuchsia.sysmem2/BufferCollection.SetConstraints`]
4728    ///     sent to them, or had [`fuchsia.sysmem2/BufferCollection.Release`]
4729    ///     sent to them.
4730    ///
4731    /// - result `buffer_collection_info` The VMO handles and other related
4732    ///   info.
4733    /// * error `[fuchsia.sysmem2/Error.NO_MEMORY]` The request is valid but
4734    ///   cannot be fulfilled due to resource exhaustion.
4735    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION`] The request is
4736    ///   malformed.
4737    /// * error `[fuchsia.sysmem2/Error.CONSTRAINTS_INTERSECTION_EMPTY`] The
4738    ///   request is valid but cannot be satisfied, perhaps due to hardware
4739    ///   limitations. This can happen if participants have incompatible
4740    ///   constraints (empty intersection, roughly speaking). See the log for
4741    ///   more info. In cases where a participant could potentially be treated
4742    ///   as optional, see [`BufferCollectionTokenGroup`]. When using
4743    ///   [`fuchsia.sysmem2/BufferCollection.AttachToken`], this will be the
4744    ///   error code if there aren't enough buffers in the pre-existing
4745    ///   collection to satisfy the constraints set on the attached token and
4746    ///   any sub-tree of tokens derived from the attached token.
4747    WaitForAllBuffersAllocated { responder: BufferCollectionWaitForAllBuffersAllocatedResponder },
4748    /// Checks whether all the buffers have been allocated, in a polling
4749    /// fashion.
4750    ///
4751    /// * If the buffer collection has been allocated, returns success.
4752    /// * If the buffer collection failed allocation, returns the same
4753    ///   [`fuchsia.sysmem2/Error`] as
4754    ///   [`fuchsia.sysmem2/BufferCollection/WaitForAllBuffersAllocated`] would
4755    ///   return.
4756    /// * error [`fuchsia.sysmem2/Error.PENDING`] The buffer collection hasn't
4757    ///   attempted allocation yet. This means that WaitForAllBuffersAllocated
4758    ///   would not respond quickly.
4759    CheckAllBuffersAllocated { responder: BufferCollectionCheckAllBuffersAllocatedResponder },
4760    /// Create a new token to add a new participant to an existing logical
4761    /// buffer collection, if the existing collection's buffer counts,
4762    /// constraints, and participants allow.
4763    ///
4764    /// This can be useful in replacing a failed participant, and/or in
4765    /// adding/re-adding a participant after buffers have already been
4766    /// allocated.
4767    ///
4768    /// When [`fuchsia.sysmem2/BufferCollection.AttachToken`] is used, the sub
4769    /// tree rooted at the attached [`fuchsia.sysmem2/BufferCollectionToken`]
4770    /// goes through the normal procedure of setting constraints or closing
4771    /// [`fuchsia.sysmem2/Node`](s), and then appearing to allocate buffers from
4772    /// clients' point of view, despite the possibility that all the buffers
4773    /// were actually allocated previously. This process is called "logical
4774    /// allocation". Most instances of "allocation" in docs for other messages
4775    /// can also be read as "allocation or logical allocation" while remaining
4776    /// valid, but we just say "allocation" in most places for brevity/clarity
4777    /// of explanation, with the details of "logical allocation" left for the
4778    /// docs here on `AttachToken`.
4779    ///
4780    /// Failure of an attached `Node` does not propagate to the parent of the
4781    /// attached `Node`. More generally, failure of a child `Node` is blocked
4782    /// from reaching its parent `Node` if the child is attached, or if the
4783    /// child is dispensable and the failure occurred after logical allocation
4784    /// (see [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`]).
4785    ///
4786    /// A participant may in some scenarios choose to initially use a
4787    /// dispensable token for a given instance of a delegate participant, and
4788    /// then later if the first instance of that delegate participant fails, a
4789    /// new second instance of that delegate participant my be given a token
4790    /// created with `AttachToken`.
4791    ///
4792    /// From the point of view of the [`fuchsia.sysmem2/BufferCollectionToken`]
4793    /// client end, the token acts like any other token. The client can
4794    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] the token as needed,
4795    /// and can send the token to a different process/participant. The
4796    /// `BufferCollectionToken` `Node` should be converted to a
4797    /// `BufferCollection` `Node` as normal by sending
4798    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or can be closed
4799    /// without causing subtree failure by sending
4800    /// [`fuchsia.sysmem2/BufferCollectionToken.Release`]. Assuming the former,
4801    /// the [`fuchsia.sysmem2/BufferCollection.SetConstraints`] message or
4802    /// [`fuchsia.sysmem2/BufferCollection.Release`] message should be sent to
4803    /// the `BufferCollection`.
4804    ///
4805    /// Within the subtree, a success result from
4806    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`] means
4807    /// the subtree participants' constraints were satisfiable using the
4808    /// already-existing buffer collection, the already-established
4809    /// [`fuchsia.sysmem2/BufferCollectionInfo`] including image format
4810    /// constraints, and the already-existing other participants (already added
4811    /// via successful logical allocation) and their specified buffer counts in
4812    /// their constraints. A failure result means the new participants'
4813    /// constraints cannot be satisfied using the existing buffer collection and
4814    /// its already-added participants. Creating a new collection instead may
4815    /// allow all participants' constraints to be satisfied, assuming
4816    /// `SetDispensable` is used in place of `AttachToken`, or a normal token is
4817    /// used.
4818    ///
4819    /// A token created with `AttachToken` performs constraints aggregation with
4820    /// all constraints currently in effect on the buffer collection, plus the
4821    /// attached token under consideration plus child tokens under the attached
4822    /// token which are not themselves an attached token or under such a token.
4823    /// Further subtrees under this subtree are considered for logical
4824    /// allocation only after this subtree has completed logical allocation.
4825    ///
4826    /// Assignment of existing buffers to participants'
4827    /// [`fuchsia.sysmem2/BufferCollectionConstraints.min_buffer_count_for_camping`]
4828    /// etc is first-come first-served, but a child can't logically allocate
4829    /// before all its parents have sent `SetConstraints`.
4830    ///
4831    /// See also [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`], which
4832    /// in contrast to `AttachToken`, has the created token `Node` + child
4833    /// `Node`(s) (in the created subtree but not in any subtree under this
4834    /// subtree) participate in constraints aggregation along with its parent
4835    /// during the parent's allocation or logical allocation.
4836    ///
4837    /// Similar to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], the
4838    /// newly created token needs to be [`fuchsia.sysmem2/Node.Sync`]ed to
4839    /// sysmem before the new token can be passed to `BindSharedCollection`. The
4840    /// `Sync` of the new token can be accomplished with
4841    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after converting the created
4842    /// `BufferCollectionToken` to a `BufferCollection`. Alternately,
4843    /// [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on the new token also
4844    /// works. Or using [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`]
4845    /// works. As usual, a `BufferCollectionToken.Sync` can be started after any
4846    /// `BufferCollectionToken.Duplicate` messages have been sent via the newly
4847    /// created token, to also sync those additional tokens to sysmem using a
4848    /// single round-trip.
4849    ///
4850    /// All table fields are currently required.
4851    ///
4852    /// + request `rights_attentuation_mask` This allows attenuating the VMO
4853    ///   rights of the subtree. These values for `rights_attenuation_mask`
4854    ///   result in no attenuation (note that 0 is not on this list):
4855    ///   + ZX_RIGHT_SAME_RIGHTS (preferred)
4856    ///   + 0xFFFFFFFF (this is reasonable when an attenuation mask is computed)
4857    /// + request `token_request` The server end of the `BufferCollectionToken`
4858    ///   channel. The client retains the client end.
4859    AttachToken {
4860        payload: BufferCollectionAttachTokenRequest,
4861        control_handle: BufferCollectionControlHandle,
4862    },
4863    /// Set up an eventpair to be signalled (`ZX_EVENTPAIR_PEER_CLOSED`) when
4864    /// buffers have been allocated and only the specified number of buffers (or
4865    /// fewer) remain in the buffer collection.
4866    ///
4867    /// [`fuchsia.sysmem2/BufferCollection.AttachLifetimeTracking`] allows a
4868    /// client to wait until an old buffer collection is fully or mostly
4869    /// deallocated before attempting allocation of a new buffer collection. The
4870    /// eventpair is only signalled when the buffers of this collection have
4871    /// been fully deallocated (not just un-referenced by clients, but all the
4872    /// memory consumed by those buffers has been fully reclaimed/recycled), or
4873    /// when allocation or logical allocation fails for the tree or subtree
4874    /// including this [`fuchsia.sysmem2/BufferCollection`].
4875    ///
4876    /// The eventpair won't be signalled until allocation or logical allocation
4877    /// has completed; until then, the collection's current buffer count is
4878    /// ignored.
4879    ///
4880    /// If logical allocation fails for an attached subtree (using
4881    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]), the server end of the
4882    /// eventpair will close during that failure regardless of the number of
4883    /// buffers potenitally allocated in the overall buffer collection. This is
4884    /// for logical allocation consistency with normal allocation.
4885    ///
4886    /// The lifetime signalled by this event includes asynchronous cleanup of
4887    /// allocated buffers, and this asynchronous cleanup cannot occur until all
4888    /// holders of VMO handles to the buffers have closed those VMO handles.
4889    /// Therefore, clients should take care not to become blocked forever
4890    /// waiting for `ZX_EVENTPAIR_PEER_CLOSED` to be signalled if any of the
4891    /// participants using the logical buffer collection (including the waiter
4892    /// itself) are less trusted, less reliable, or potentially blocked by the
4893    /// wait itself. Waiting asynchronously is recommended. Setting a deadline
4894    /// for the client wait may be prudent, depending on details of how the
4895    /// collection and/or its VMOs are used or shared. Failure to allocate a
4896    /// new/replacement buffer collection is better than getting stuck forever.
4897    ///
4898    /// The sysmem server itself intentionally does not perform any waiting on
4899    /// already-failed collections' VMOs to finish cleaning up before attempting
4900    /// a new allocation, and the sysmem server intentionally doesn't retry
4901    /// allocation if a new allocation fails due to out of memory, even if that
4902    /// failure is potentially due to continued existence of an old collection's
4903    /// VMOs. This `AttachLifetimeTracking` message is how an initiator can
4904    /// mitigate too much overlap of old VMO lifetimes with new VMO lifetimes,
4905    /// as long as the waiting client is careful to not create a deadlock.
4906    ///
4907    /// Continued existence of old collections that are still cleaning up is not
4908    /// the only reason that a new allocation may fail due to insufficient
4909    /// memory, even if the new allocation is allocating physically contiguous
4910    /// buffers. Overall system memory pressure can also be the cause of failure
4911    /// to allocate a new collection. See also
4912    /// [`fuchsia.memorypressure/Provider`].
4913    ///
4914    /// `AttachLifetimeTracking` is meant to be compatible with other protocols
4915    /// with a similar `AttachLifetimeTracking` message; duplicates of the same
4916    /// `eventpair` handle (server end) can be sent via more than one
4917    /// `AttachLifetimeTracking` message to different protocols, and the
4918    /// `ZX_EVENTPAIR_PEER_CLOSED` will be signalled for the client end when all
4919    /// the conditions are met (all holders of duplicates have closed their
4920    /// server end handle(s)). Also, thanks to how eventpair endponts work, the
4921    /// client end can (also) be duplicated without preventing the
4922    /// `ZX_EVENTPAIR_PEER_CLOSED` signal.
4923    ///
4924    /// The server intentionally doesn't "trust" any signals set on the
4925    /// `server_end`. This mechanism intentionally uses only
4926    /// `ZX_EVENTPAIR_PEER_CLOSED` set on the client end, which can't be set
4927    /// "early", and is only set when all handles to the server end eventpair
4928    /// are closed. No meaning is associated with any of the other signals, and
4929    /// clients should ignore any other signal bits on either end of the
4930    /// `eventpair`.
4931    ///
4932    /// The `server_end` may lack `ZX_RIGHT_SIGNAL` or `ZX_RIGHT_SIGNAL_PEER`,
4933    /// but must have `ZX_RIGHT_DUPLICATE` (and must have `ZX_RIGHT_TRANSFER` to
4934    /// transfer without causing `BufferCollection` channel failure).
4935    ///
4936    /// All table fields are currently required.
4937    ///
4938    /// + request `server_end` This eventpair handle will be closed by the
4939    ///   sysmem server when buffers have been allocated initially and the
4940    ///   number of buffers is then less than or equal to `buffers_remaining`.
4941    /// + request `buffers_remaining` Wait for all but `buffers_remaining` (or
4942    ///   fewer) buffers to be fully deallocated. A number greater than zero can
4943    ///   be useful in situations where a known number of buffers are
4944    ///   intentionally not closed so that the data can continue to be used,
4945    ///   such as for keeping the last available video frame displayed in the UI
4946    ///   even if the video stream was using protected output buffers. It's
4947    ///   outside the scope of the `BufferCollection` interface (at least for
4948    ///   now) to determine how many buffers may be held without closing, but
4949    ///   it'll typically be in the range 0-2.
4950    AttachLifetimeTracking {
4951        payload: BufferCollectionAttachLifetimeTrackingRequest,
4952        control_handle: BufferCollectionControlHandle,
4953    },
4954    /// An interaction was received which does not match any known method.
4955    #[non_exhaustive]
4956    _UnknownMethod {
4957        /// Ordinal of the method that was called.
4958        ordinal: u64,
4959        control_handle: BufferCollectionControlHandle,
4960        method_type: fidl::MethodType,
4961    },
4962}
4963
4964impl BufferCollectionRequest {
4965    #[allow(irrefutable_let_patterns)]
4966    pub fn into_sync(self) -> Option<(BufferCollectionSyncResponder)> {
4967        if let BufferCollectionRequest::Sync { responder } = self {
4968            Some((responder))
4969        } else {
4970            None
4971        }
4972    }
4973
4974    #[allow(irrefutable_let_patterns)]
4975    pub fn into_release(self) -> Option<(BufferCollectionControlHandle)> {
4976        if let BufferCollectionRequest::Release { control_handle } = self {
4977            Some((control_handle))
4978        } else {
4979            None
4980        }
4981    }
4982
4983    #[allow(irrefutable_let_patterns)]
4984    pub fn into_set_name(self) -> Option<(NodeSetNameRequest, BufferCollectionControlHandle)> {
4985        if let BufferCollectionRequest::SetName { payload, control_handle } = self {
4986            Some((payload, control_handle))
4987        } else {
4988            None
4989        }
4990    }
4991
4992    #[allow(irrefutable_let_patterns)]
4993    pub fn into_set_debug_client_info(
4994        self,
4995    ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionControlHandle)> {
4996        if let BufferCollectionRequest::SetDebugClientInfo { payload, control_handle } = self {
4997            Some((payload, control_handle))
4998        } else {
4999            None
5000        }
5001    }
5002
5003    #[allow(irrefutable_let_patterns)]
5004    pub fn into_set_debug_timeout_log_deadline(
5005        self,
5006    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionControlHandle)> {
5007        if let BufferCollectionRequest::SetDebugTimeoutLogDeadline { payload, control_handle } =
5008            self
5009        {
5010            Some((payload, control_handle))
5011        } else {
5012            None
5013        }
5014    }
5015
5016    #[allow(irrefutable_let_patterns)]
5017    pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionControlHandle)> {
5018        if let BufferCollectionRequest::SetVerboseLogging { control_handle } = self {
5019            Some((control_handle))
5020        } else {
5021            None
5022        }
5023    }
5024
5025    #[allow(irrefutable_let_patterns)]
5026    pub fn into_get_node_ref(self) -> Option<(BufferCollectionGetNodeRefResponder)> {
5027        if let BufferCollectionRequest::GetNodeRef { responder } = self {
5028            Some((responder))
5029        } else {
5030            None
5031        }
5032    }
5033
5034    #[allow(irrefutable_let_patterns)]
5035    pub fn into_is_alternate_for(
5036        self,
5037    ) -> Option<(NodeIsAlternateForRequest, BufferCollectionIsAlternateForResponder)> {
5038        if let BufferCollectionRequest::IsAlternateFor { payload, responder } = self {
5039            Some((payload, responder))
5040        } else {
5041            None
5042        }
5043    }
5044
5045    #[allow(irrefutable_let_patterns)]
5046    pub fn into_get_buffer_collection_id(
5047        self,
5048    ) -> Option<(BufferCollectionGetBufferCollectionIdResponder)> {
5049        if let BufferCollectionRequest::GetBufferCollectionId { responder } = self {
5050            Some((responder))
5051        } else {
5052            None
5053        }
5054    }
5055
5056    #[allow(irrefutable_let_patterns)]
5057    pub fn into_set_weak(self) -> Option<(BufferCollectionControlHandle)> {
5058        if let BufferCollectionRequest::SetWeak { control_handle } = self {
5059            Some((control_handle))
5060        } else {
5061            None
5062        }
5063    }
5064
5065    #[allow(irrefutable_let_patterns)]
5066    pub fn into_set_weak_ok(self) -> Option<(NodeSetWeakOkRequest, BufferCollectionControlHandle)> {
5067        if let BufferCollectionRequest::SetWeakOk { payload, control_handle } = self {
5068            Some((payload, control_handle))
5069        } else {
5070            None
5071        }
5072    }
5073
5074    #[allow(irrefutable_let_patterns)]
5075    pub fn into_attach_node_tracking(
5076        self,
5077    ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionControlHandle)> {
5078        if let BufferCollectionRequest::AttachNodeTracking { payload, control_handle } = self {
5079            Some((payload, control_handle))
5080        } else {
5081            None
5082        }
5083    }
5084
5085    #[allow(irrefutable_let_patterns)]
5086    pub fn into_set_constraints(
5087        self,
5088    ) -> Option<(BufferCollectionSetConstraintsRequest, BufferCollectionControlHandle)> {
5089        if let BufferCollectionRequest::SetConstraints { payload, control_handle } = self {
5090            Some((payload, control_handle))
5091        } else {
5092            None
5093        }
5094    }
5095
5096    #[allow(irrefutable_let_patterns)]
5097    pub fn into_wait_for_all_buffers_allocated(
5098        self,
5099    ) -> Option<(BufferCollectionWaitForAllBuffersAllocatedResponder)> {
5100        if let BufferCollectionRequest::WaitForAllBuffersAllocated { responder } = self {
5101            Some((responder))
5102        } else {
5103            None
5104        }
5105    }
5106
5107    #[allow(irrefutable_let_patterns)]
5108    pub fn into_check_all_buffers_allocated(
5109        self,
5110    ) -> Option<(BufferCollectionCheckAllBuffersAllocatedResponder)> {
5111        if let BufferCollectionRequest::CheckAllBuffersAllocated { responder } = self {
5112            Some((responder))
5113        } else {
5114            None
5115        }
5116    }
5117
5118    #[allow(irrefutable_let_patterns)]
5119    pub fn into_attach_token(
5120        self,
5121    ) -> Option<(BufferCollectionAttachTokenRequest, BufferCollectionControlHandle)> {
5122        if let BufferCollectionRequest::AttachToken { payload, control_handle } = self {
5123            Some((payload, control_handle))
5124        } else {
5125            None
5126        }
5127    }
5128
5129    #[allow(irrefutable_let_patterns)]
5130    pub fn into_attach_lifetime_tracking(
5131        self,
5132    ) -> Option<(BufferCollectionAttachLifetimeTrackingRequest, BufferCollectionControlHandle)>
5133    {
5134        if let BufferCollectionRequest::AttachLifetimeTracking { payload, control_handle } = self {
5135            Some((payload, control_handle))
5136        } else {
5137            None
5138        }
5139    }
5140
5141    /// Name of the method defined in FIDL
5142    pub fn method_name(&self) -> &'static str {
5143        match *self {
5144            BufferCollectionRequest::Sync { .. } => "sync",
5145            BufferCollectionRequest::Release { .. } => "release",
5146            BufferCollectionRequest::SetName { .. } => "set_name",
5147            BufferCollectionRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
5148            BufferCollectionRequest::SetDebugTimeoutLogDeadline { .. } => {
5149                "set_debug_timeout_log_deadline"
5150            }
5151            BufferCollectionRequest::SetVerboseLogging { .. } => "set_verbose_logging",
5152            BufferCollectionRequest::GetNodeRef { .. } => "get_node_ref",
5153            BufferCollectionRequest::IsAlternateFor { .. } => "is_alternate_for",
5154            BufferCollectionRequest::GetBufferCollectionId { .. } => "get_buffer_collection_id",
5155            BufferCollectionRequest::SetWeak { .. } => "set_weak",
5156            BufferCollectionRequest::SetWeakOk { .. } => "set_weak_ok",
5157            BufferCollectionRequest::AttachNodeTracking { .. } => "attach_node_tracking",
5158            BufferCollectionRequest::SetConstraints { .. } => "set_constraints",
5159            BufferCollectionRequest::WaitForAllBuffersAllocated { .. } => {
5160                "wait_for_all_buffers_allocated"
5161            }
5162            BufferCollectionRequest::CheckAllBuffersAllocated { .. } => {
5163                "check_all_buffers_allocated"
5164            }
5165            BufferCollectionRequest::AttachToken { .. } => "attach_token",
5166            BufferCollectionRequest::AttachLifetimeTracking { .. } => "attach_lifetime_tracking",
5167            BufferCollectionRequest::_UnknownMethod {
5168                method_type: fidl::MethodType::OneWay,
5169                ..
5170            } => "unknown one-way method",
5171            BufferCollectionRequest::_UnknownMethod {
5172                method_type: fidl::MethodType::TwoWay,
5173                ..
5174            } => "unknown two-way method",
5175        }
5176    }
5177}
5178
5179#[derive(Debug, Clone)]
5180pub struct BufferCollectionControlHandle {
5181    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
5182}
5183
5184impl fidl::endpoints::ControlHandle for BufferCollectionControlHandle {
5185    fn shutdown(&self) {
5186        self.inner.shutdown()
5187    }
5188    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
5189        self.inner.shutdown_with_epitaph(status)
5190    }
5191
5192    fn is_closed(&self) -> bool {
5193        self.inner.channel().is_closed()
5194    }
5195    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
5196        self.inner.channel().on_closed()
5197    }
5198
5199    #[cfg(target_os = "fuchsia")]
5200    fn signal_peer(
5201        &self,
5202        clear_mask: zx::Signals,
5203        set_mask: zx::Signals,
5204    ) -> Result<(), zx_status::Status> {
5205        use fidl::Peered;
5206        self.inner.channel().signal_peer(clear_mask, set_mask)
5207    }
5208}
5209
5210impl BufferCollectionControlHandle {}
5211
5212#[must_use = "FIDL methods require a response to be sent"]
5213#[derive(Debug)]
5214pub struct BufferCollectionSyncResponder {
5215    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5216    tx_id: u32,
5217}
5218
5219/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5220/// if the responder is dropped without sending a response, so that the client
5221/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5222impl std::ops::Drop for BufferCollectionSyncResponder {
5223    fn drop(&mut self) {
5224        self.control_handle.shutdown();
5225        // Safety: drops once, never accessed again
5226        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5227    }
5228}
5229
5230impl fidl::endpoints::Responder for BufferCollectionSyncResponder {
5231    type ControlHandle = BufferCollectionControlHandle;
5232
5233    fn control_handle(&self) -> &BufferCollectionControlHandle {
5234        &self.control_handle
5235    }
5236
5237    fn drop_without_shutdown(mut self) {
5238        // Safety: drops once, never accessed again due to mem::forget
5239        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5240        // Prevent Drop from running (which would shut down the channel)
5241        std::mem::forget(self);
5242    }
5243}
5244
5245impl BufferCollectionSyncResponder {
5246    /// Sends a response to the FIDL transaction.
5247    ///
5248    /// Sets the channel to shutdown if an error occurs.
5249    pub fn send(self) -> Result<(), fidl::Error> {
5250        let _result = self.send_raw();
5251        if _result.is_err() {
5252            self.control_handle.shutdown();
5253        }
5254        self.drop_without_shutdown();
5255        _result
5256    }
5257
5258    /// Similar to "send" but does not shutdown the channel if an error occurs.
5259    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
5260        let _result = self.send_raw();
5261        self.drop_without_shutdown();
5262        _result
5263    }
5264
5265    fn send_raw(&self) -> Result<(), fidl::Error> {
5266        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
5267            fidl::encoding::Flexible::new(()),
5268            self.tx_id,
5269            0x11ac2555cf575b54,
5270            fidl::encoding::DynamicFlags::FLEXIBLE,
5271        )
5272    }
5273}
5274
5275#[must_use = "FIDL methods require a response to be sent"]
5276#[derive(Debug)]
5277pub struct BufferCollectionGetNodeRefResponder {
5278    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5279    tx_id: u32,
5280}
5281
5282/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5283/// if the responder is dropped without sending a response, so that the client
5284/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5285impl std::ops::Drop for BufferCollectionGetNodeRefResponder {
5286    fn drop(&mut self) {
5287        self.control_handle.shutdown();
5288        // Safety: drops once, never accessed again
5289        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5290    }
5291}
5292
5293impl fidl::endpoints::Responder for BufferCollectionGetNodeRefResponder {
5294    type ControlHandle = BufferCollectionControlHandle;
5295
5296    fn control_handle(&self) -> &BufferCollectionControlHandle {
5297        &self.control_handle
5298    }
5299
5300    fn drop_without_shutdown(mut self) {
5301        // Safety: drops once, never accessed again due to mem::forget
5302        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5303        // Prevent Drop from running (which would shut down the channel)
5304        std::mem::forget(self);
5305    }
5306}
5307
5308impl BufferCollectionGetNodeRefResponder {
5309    /// Sends a response to the FIDL transaction.
5310    ///
5311    /// Sets the channel to shutdown if an error occurs.
5312    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
5313        let _result = self.send_raw(payload);
5314        if _result.is_err() {
5315            self.control_handle.shutdown();
5316        }
5317        self.drop_without_shutdown();
5318        _result
5319    }
5320
5321    /// Similar to "send" but does not shutdown the channel if an error occurs.
5322    pub fn send_no_shutdown_on_err(
5323        self,
5324        mut payload: NodeGetNodeRefResponse,
5325    ) -> Result<(), fidl::Error> {
5326        let _result = self.send_raw(payload);
5327        self.drop_without_shutdown();
5328        _result
5329    }
5330
5331    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
5332        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
5333            fidl::encoding::Flexible::new(&mut payload),
5334            self.tx_id,
5335            0x5b3d0e51614df053,
5336            fidl::encoding::DynamicFlags::FLEXIBLE,
5337        )
5338    }
5339}
5340
5341#[must_use = "FIDL methods require a response to be sent"]
5342#[derive(Debug)]
5343pub struct BufferCollectionIsAlternateForResponder {
5344    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5345    tx_id: u32,
5346}
5347
5348/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5349/// if the responder is dropped without sending a response, so that the client
5350/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5351impl std::ops::Drop for BufferCollectionIsAlternateForResponder {
5352    fn drop(&mut self) {
5353        self.control_handle.shutdown();
5354        // Safety: drops once, never accessed again
5355        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5356    }
5357}
5358
5359impl fidl::endpoints::Responder for BufferCollectionIsAlternateForResponder {
5360    type ControlHandle = BufferCollectionControlHandle;
5361
5362    fn control_handle(&self) -> &BufferCollectionControlHandle {
5363        &self.control_handle
5364    }
5365
5366    fn drop_without_shutdown(mut self) {
5367        // Safety: drops once, never accessed again due to mem::forget
5368        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5369        // Prevent Drop from running (which would shut down the channel)
5370        std::mem::forget(self);
5371    }
5372}
5373
5374impl BufferCollectionIsAlternateForResponder {
5375    /// Sends a response to the FIDL transaction.
5376    ///
5377    /// Sets the channel to shutdown if an error occurs.
5378    pub fn send(
5379        self,
5380        mut result: Result<&NodeIsAlternateForResponse, Error>,
5381    ) -> Result<(), fidl::Error> {
5382        let _result = self.send_raw(result);
5383        if _result.is_err() {
5384            self.control_handle.shutdown();
5385        }
5386        self.drop_without_shutdown();
5387        _result
5388    }
5389
5390    /// Similar to "send" but does not shutdown the channel if an error occurs.
5391    pub fn send_no_shutdown_on_err(
5392        self,
5393        mut result: Result<&NodeIsAlternateForResponse, Error>,
5394    ) -> Result<(), fidl::Error> {
5395        let _result = self.send_raw(result);
5396        self.drop_without_shutdown();
5397        _result
5398    }
5399
5400    fn send_raw(
5401        &self,
5402        mut result: Result<&NodeIsAlternateForResponse, Error>,
5403    ) -> Result<(), fidl::Error> {
5404        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5405            NodeIsAlternateForResponse,
5406            Error,
5407        >>(
5408            fidl::encoding::FlexibleResult::new(result),
5409            self.tx_id,
5410            0x3a58e00157e0825,
5411            fidl::encoding::DynamicFlags::FLEXIBLE,
5412        )
5413    }
5414}
5415
5416#[must_use = "FIDL methods require a response to be sent"]
5417#[derive(Debug)]
5418pub struct BufferCollectionGetBufferCollectionIdResponder {
5419    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5420    tx_id: u32,
5421}
5422
5423/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5424/// if the responder is dropped without sending a response, so that the client
5425/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5426impl std::ops::Drop for BufferCollectionGetBufferCollectionIdResponder {
5427    fn drop(&mut self) {
5428        self.control_handle.shutdown();
5429        // Safety: drops once, never accessed again
5430        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5431    }
5432}
5433
5434impl fidl::endpoints::Responder for BufferCollectionGetBufferCollectionIdResponder {
5435    type ControlHandle = BufferCollectionControlHandle;
5436
5437    fn control_handle(&self) -> &BufferCollectionControlHandle {
5438        &self.control_handle
5439    }
5440
5441    fn drop_without_shutdown(mut self) {
5442        // Safety: drops once, never accessed again due to mem::forget
5443        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5444        // Prevent Drop from running (which would shut down the channel)
5445        std::mem::forget(self);
5446    }
5447}
5448
5449impl BufferCollectionGetBufferCollectionIdResponder {
5450    /// Sends a response to the FIDL transaction.
5451    ///
5452    /// Sets the channel to shutdown if an error occurs.
5453    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
5454        let _result = self.send_raw(payload);
5455        if _result.is_err() {
5456            self.control_handle.shutdown();
5457        }
5458        self.drop_without_shutdown();
5459        _result
5460    }
5461
5462    /// Similar to "send" but does not shutdown the channel if an error occurs.
5463    pub fn send_no_shutdown_on_err(
5464        self,
5465        mut payload: &NodeGetBufferCollectionIdResponse,
5466    ) -> Result<(), fidl::Error> {
5467        let _result = self.send_raw(payload);
5468        self.drop_without_shutdown();
5469        _result
5470    }
5471
5472    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
5473        self.control_handle
5474            .inner
5475            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
5476                fidl::encoding::Flexible::new(payload),
5477                self.tx_id,
5478                0x77d19a494b78ba8c,
5479                fidl::encoding::DynamicFlags::FLEXIBLE,
5480            )
5481    }
5482}
5483
5484#[must_use = "FIDL methods require a response to be sent"]
5485#[derive(Debug)]
5486pub struct BufferCollectionWaitForAllBuffersAllocatedResponder {
5487    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5488    tx_id: u32,
5489}
5490
5491/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5492/// if the responder is dropped without sending a response, so that the client
5493/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5494impl std::ops::Drop for BufferCollectionWaitForAllBuffersAllocatedResponder {
5495    fn drop(&mut self) {
5496        self.control_handle.shutdown();
5497        // Safety: drops once, never accessed again
5498        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5499    }
5500}
5501
5502impl fidl::endpoints::Responder for BufferCollectionWaitForAllBuffersAllocatedResponder {
5503    type ControlHandle = BufferCollectionControlHandle;
5504
5505    fn control_handle(&self) -> &BufferCollectionControlHandle {
5506        &self.control_handle
5507    }
5508
5509    fn drop_without_shutdown(mut self) {
5510        // Safety: drops once, never accessed again due to mem::forget
5511        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5512        // Prevent Drop from running (which would shut down the channel)
5513        std::mem::forget(self);
5514    }
5515}
5516
5517impl BufferCollectionWaitForAllBuffersAllocatedResponder {
5518    /// Sends a response to the FIDL transaction.
5519    ///
5520    /// Sets the channel to shutdown if an error occurs.
5521    pub fn send(
5522        self,
5523        mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5524    ) -> Result<(), fidl::Error> {
5525        let _result = self.send_raw(result);
5526        if _result.is_err() {
5527            self.control_handle.shutdown();
5528        }
5529        self.drop_without_shutdown();
5530        _result
5531    }
5532
5533    /// Similar to "send" but does not shutdown the channel if an error occurs.
5534    pub fn send_no_shutdown_on_err(
5535        self,
5536        mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5537    ) -> Result<(), fidl::Error> {
5538        let _result = self.send_raw(result);
5539        self.drop_without_shutdown();
5540        _result
5541    }
5542
5543    fn send_raw(
5544        &self,
5545        mut result: Result<BufferCollectionWaitForAllBuffersAllocatedResponse, Error>,
5546    ) -> Result<(), fidl::Error> {
5547        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5548            BufferCollectionWaitForAllBuffersAllocatedResponse,
5549            Error,
5550        >>(
5551            fidl::encoding::FlexibleResult::new(result.as_mut().map_err(|e| *e)),
5552            self.tx_id,
5553            0x62300344b61404e,
5554            fidl::encoding::DynamicFlags::FLEXIBLE,
5555        )
5556    }
5557}
5558
5559#[must_use = "FIDL methods require a response to be sent"]
5560#[derive(Debug)]
5561pub struct BufferCollectionCheckAllBuffersAllocatedResponder {
5562    control_handle: std::mem::ManuallyDrop<BufferCollectionControlHandle>,
5563    tx_id: u32,
5564}
5565
5566/// Set the the channel to be shutdown (see [`BufferCollectionControlHandle::shutdown`])
5567/// if the responder is dropped without sending a response, so that the client
5568/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
5569impl std::ops::Drop for BufferCollectionCheckAllBuffersAllocatedResponder {
5570    fn drop(&mut self) {
5571        self.control_handle.shutdown();
5572        // Safety: drops once, never accessed again
5573        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5574    }
5575}
5576
5577impl fidl::endpoints::Responder for BufferCollectionCheckAllBuffersAllocatedResponder {
5578    type ControlHandle = BufferCollectionControlHandle;
5579
5580    fn control_handle(&self) -> &BufferCollectionControlHandle {
5581        &self.control_handle
5582    }
5583
5584    fn drop_without_shutdown(mut self) {
5585        // Safety: drops once, never accessed again due to mem::forget
5586        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
5587        // Prevent Drop from running (which would shut down the channel)
5588        std::mem::forget(self);
5589    }
5590}
5591
5592impl BufferCollectionCheckAllBuffersAllocatedResponder {
5593    /// Sends a response to the FIDL transaction.
5594    ///
5595    /// Sets the channel to shutdown if an error occurs.
5596    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5597        let _result = self.send_raw(result);
5598        if _result.is_err() {
5599            self.control_handle.shutdown();
5600        }
5601        self.drop_without_shutdown();
5602        _result
5603    }
5604
5605    /// Similar to "send" but does not shutdown the channel if an error occurs.
5606    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5607        let _result = self.send_raw(result);
5608        self.drop_without_shutdown();
5609        _result
5610    }
5611
5612    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
5613        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
5614            fidl::encoding::EmptyStruct,
5615            Error,
5616        >>(
5617            fidl::encoding::FlexibleResult::new(result),
5618            self.tx_id,
5619            0x35a5fe77ce939c10,
5620            fidl::encoding::DynamicFlags::FLEXIBLE,
5621        )
5622    }
5623}
5624
5625#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
5626pub struct BufferCollectionTokenMarker;
5627
5628impl fidl::endpoints::ProtocolMarker for BufferCollectionTokenMarker {
5629    type Proxy = BufferCollectionTokenProxy;
5630    type RequestStream = BufferCollectionTokenRequestStream;
5631    #[cfg(target_os = "fuchsia")]
5632    type SynchronousProxy = BufferCollectionTokenSynchronousProxy;
5633
5634    const DEBUG_NAME: &'static str = "(anonymous) BufferCollectionToken";
5635}
5636
5637pub trait BufferCollectionTokenProxyInterface: Send + Sync {
5638    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
5639    fn r#sync(&self) -> Self::SyncResponseFut;
5640    fn r#release(&self) -> Result<(), fidl::Error>;
5641    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
5642    fn r#set_debug_client_info(
5643        &self,
5644        payload: &NodeSetDebugClientInfoRequest,
5645    ) -> Result<(), fidl::Error>;
5646    fn r#set_debug_timeout_log_deadline(
5647        &self,
5648        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
5649    ) -> Result<(), fidl::Error>;
5650    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
5651    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
5652        + Send;
5653    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
5654    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
5655        + Send;
5656    fn r#is_alternate_for(
5657        &self,
5658        payload: NodeIsAlternateForRequest,
5659    ) -> Self::IsAlternateForResponseFut;
5660    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
5661        + Send;
5662    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
5663    fn r#set_weak(&self) -> Result<(), fidl::Error>;
5664    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
5665    fn r#attach_node_tracking(
5666        &self,
5667        payload: NodeAttachNodeTrackingRequest,
5668    ) -> Result<(), fidl::Error>;
5669    type DuplicateSyncResponseFut: std::future::Future<
5670            Output = Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error>,
5671        > + Send;
5672    fn r#duplicate_sync(
5673        &self,
5674        payload: &BufferCollectionTokenDuplicateSyncRequest,
5675    ) -> Self::DuplicateSyncResponseFut;
5676    fn r#duplicate(
5677        &self,
5678        payload: BufferCollectionTokenDuplicateRequest,
5679    ) -> Result<(), fidl::Error>;
5680    fn r#set_dispensable(&self) -> Result<(), fidl::Error>;
5681    fn r#create_buffer_collection_token_group(
5682        &self,
5683        payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
5684    ) -> Result<(), fidl::Error>;
5685}
5686#[derive(Debug)]
5687#[cfg(target_os = "fuchsia")]
5688pub struct BufferCollectionTokenSynchronousProxy {
5689    client: fidl::client::sync::Client,
5690}
5691
5692#[cfg(target_os = "fuchsia")]
5693impl fidl::endpoints::SynchronousProxy for BufferCollectionTokenSynchronousProxy {
5694    type Proxy = BufferCollectionTokenProxy;
5695    type Protocol = BufferCollectionTokenMarker;
5696
5697    fn from_channel(inner: fidl::Channel) -> Self {
5698        Self::new(inner)
5699    }
5700
5701    fn into_channel(self) -> fidl::Channel {
5702        self.client.into_channel()
5703    }
5704
5705    fn as_channel(&self) -> &fidl::Channel {
5706        self.client.as_channel()
5707    }
5708}
5709
5710#[cfg(target_os = "fuchsia")]
5711impl BufferCollectionTokenSynchronousProxy {
5712    pub fn new(channel: fidl::Channel) -> Self {
5713        let protocol_name =
5714            <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
5715        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
5716    }
5717
5718    pub fn into_channel(self) -> fidl::Channel {
5719        self.client.into_channel()
5720    }
5721
5722    /// Waits until an event arrives and returns it. It is safe for other
5723    /// threads to make concurrent requests while waiting for an event.
5724    pub fn wait_for_event(
5725        &self,
5726        deadline: zx::MonotonicInstant,
5727    ) -> Result<BufferCollectionTokenEvent, fidl::Error> {
5728        BufferCollectionTokenEvent::decode(self.client.wait_for_event(deadline)?)
5729    }
5730
5731    /// Ensure that previous messages have been received server side. This is
5732    /// particularly useful after previous messages that created new tokens,
5733    /// because a token must be known to the sysmem server before sending the
5734    /// token to another participant.
5735    ///
5736    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
5737    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
5738    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
5739    /// to mitigate the possibility of a hostile/fake
5740    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
5741    /// Another way is to pass the token to
5742    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
5743    /// the token as part of exchanging it for a
5744    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
5745    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
5746    /// of stalling.
5747    ///
5748    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
5749    /// and then starting and completing a `Sync`, it's then safe to send the
5750    /// `BufferCollectionToken` client ends to other participants knowing the
5751    /// server will recognize the tokens when they're sent by the other
5752    /// participants to sysmem in a
5753    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
5754    /// efficient way to create tokens while avoiding unnecessary round trips.
5755    ///
5756    /// Other options include waiting for each
5757    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
5758    /// individually (using separate call to `Sync` after each), or calling
5759    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
5760    /// converted to a `BufferCollection` via
5761    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
5762    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
5763    /// the sync step and can create multiple tokens at once.
5764    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
5765        let _response = self.client.send_query::<
5766            fidl::encoding::EmptyPayload,
5767            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
5768        >(
5769            (),
5770            0x11ac2555cf575b54,
5771            fidl::encoding::DynamicFlags::FLEXIBLE,
5772            ___deadline,
5773        )?
5774        .into_result::<BufferCollectionTokenMarker>("sync")?;
5775        Ok(_response)
5776    }
5777
5778    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
5779    ///
5780    /// Normally a participant will convert a `BufferCollectionToken` into a
5781    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
5782    /// `Release` via the token (and then close the channel immediately or
5783    /// shortly later in response to server closing the server end), which
5784    /// avoids causing buffer collection failure. Without a prior `Release`,
5785    /// closing the `BufferCollectionToken` client end will cause buffer
5786    /// collection failure.
5787    ///
5788    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
5789    ///
5790    /// By default the server handles unexpected closure of a
5791    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
5792    /// first) by failing the buffer collection. Partly this is to expedite
5793    /// closing VMO handles to reclaim memory when any participant fails. If a
5794    /// participant would like to cleanly close a `BufferCollection` without
5795    /// causing buffer collection failure, the participant can send `Release`
5796    /// before closing the `BufferCollection` client end. The `Release` can
5797    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
5798    /// buffer collection won't require constraints from this node in order to
5799    /// allocate. If after `SetConstraints`, the constraints are retained and
5800    /// aggregated, despite the lack of `BufferCollection` connection at the
5801    /// time of constraints aggregation.
5802    ///
5803    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
5804    ///
5805    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
5806    /// end (without `Release` first) will trigger failure of the buffer
5807    /// collection. To close a `BufferCollectionTokenGroup` channel without
5808    /// failing the buffer collection, ensure that AllChildrenPresent() has been
5809    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
5810    /// client end.
5811    ///
5812    /// If `Release` occurs before
5813    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
5814    /// buffer collection will fail (triggered by reception of `Release` without
5815    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
5816    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
5817    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
5818    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
5819    /// close requires `AllChildrenPresent` (if not already sent), then
5820    /// `Release`, then close client end.
5821    ///
5822    /// If `Release` occurs after `AllChildrenPresent`, the children and all
5823    /// their constraints remain intact (just as they would if the
5824    /// `BufferCollectionTokenGroup` channel had remained open), and the client
5825    /// end close doesn't trigger buffer collection failure.
5826    ///
5827    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
5828    ///
5829    /// For brevity, the per-channel-protocol paragraphs above ignore the
5830    /// separate failure domain created by
5831    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
5832    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
5833    /// unexpectedly closes (without `Release` first) and that client end is
5834    /// under a failure domain, instead of failing the whole buffer collection,
5835    /// the failure domain is failed, but the buffer collection itself is
5836    /// isolated from failure of the failure domain. Such failure domains can be
5837    /// nested, in which case only the inner-most failure domain in which the
5838    /// `Node` resides fails.
5839    pub fn r#release(&self) -> Result<(), fidl::Error> {
5840        self.client.send::<fidl::encoding::EmptyPayload>(
5841            (),
5842            0x6a5cae7d6d6e04c6,
5843            fidl::encoding::DynamicFlags::FLEXIBLE,
5844        )
5845    }
5846
5847    /// Set a name for VMOs in this buffer collection.
5848    ///
5849    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
5850    /// will be truncated to fit. The name of the vmo will be suffixed with the
5851    /// buffer index within the collection (if the suffix fits within
5852    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
5853    /// listed in the inspect data.
5854    ///
5855    /// The name only affects VMOs allocated after the name is set; this call
5856    /// does not rename existing VMOs. If multiple clients set different names
5857    /// then the larger priority value will win. Setting a new name with the
5858    /// same priority as a prior name doesn't change the name.
5859    ///
5860    /// All table fields are currently required.
5861    ///
5862    /// + request `priority` The name is only set if this is the first `SetName`
5863    ///   or if `priority` is greater than any previous `priority` value in
5864    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
5865    /// + request `name` The name for VMOs created under this buffer collection.
5866    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
5867        self.client.send::<NodeSetNameRequest>(
5868            payload,
5869            0xb41f1624f48c1e9,
5870            fidl::encoding::DynamicFlags::FLEXIBLE,
5871        )
5872    }
5873
5874    /// Set information about the current client that can be used by sysmem to
5875    /// help diagnose leaking memory and allocation stalls waiting for a
5876    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
5877    ///
5878    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
5879    /// `Node`(s) derived from this `Node`, unless overriden by
5880    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
5881    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
5882    ///
5883    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
5884    /// `Allocator` is the most efficient way to ensure that all
5885    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
5886    /// set, and is also more efficient than separately sending the same debug
5887    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
5888    /// created [`fuchsia.sysmem2/Node`].
5889    ///
5890    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
5891    /// indicate which client is closing their channel first, leading to subtree
5892    /// failure (which can be normal if the purpose of the subtree is over, but
5893    /// if happening earlier than expected, the client-channel-specific name can
5894    /// help diagnose where the failure is first coming from, from sysmem's
5895    /// point of view).
5896    ///
5897    /// All table fields are currently required.
5898    ///
5899    /// + request `name` This can be an arbitrary string, but the current
5900    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
5901    /// + request `id` This can be an arbitrary id, but the current process ID
5902    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
5903    pub fn r#set_debug_client_info(
5904        &self,
5905        mut payload: &NodeSetDebugClientInfoRequest,
5906    ) -> Result<(), fidl::Error> {
5907        self.client.send::<NodeSetDebugClientInfoRequest>(
5908            payload,
5909            0x5cde8914608d99b1,
5910            fidl::encoding::DynamicFlags::FLEXIBLE,
5911        )
5912    }
5913
5914    /// Sysmem logs a warning if sysmem hasn't seen
5915    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
5916    /// within 5 seconds after creation of a new collection.
5917    ///
5918    /// Clients can call this method to change when the log is printed. If
5919    /// multiple client set the deadline, it's unspecified which deadline will
5920    /// take effect.
5921    ///
5922    /// In most cases the default works well.
5923    ///
5924    /// All table fields are currently required.
5925    ///
5926    /// + request `deadline` The time at which sysmem will start trying to log
5927    ///   the warning, unless all constraints are with sysmem by then.
5928    pub fn r#set_debug_timeout_log_deadline(
5929        &self,
5930        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
5931    ) -> Result<(), fidl::Error> {
5932        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
5933            payload,
5934            0x716b0af13d5c0806,
5935            fidl::encoding::DynamicFlags::FLEXIBLE,
5936        )
5937    }
5938
5939    /// This enables verbose logging for the buffer collection.
5940    ///
5941    /// Verbose logging includes constraints set via
5942    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
5943    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
5944    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
5945    /// the tree of `Node`(s).
5946    ///
5947    /// Normally sysmem prints only a single line complaint when aggregation
5948    /// fails, with just the specific detailed reason that aggregation failed,
5949    /// with little surrounding context.  While this is often enough to diagnose
5950    /// a problem if only a small change was made and everything was working
5951    /// before the small change, it's often not particularly helpful for getting
5952    /// a new buffer collection to work for the first time.  Especially with
5953    /// more complex trees of nodes, involving things like
5954    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
5955    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
5956    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
5957    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
5958    /// looks like and why it's failing a logical allocation, or why a tree or
5959    /// subtree is failing sooner than expected.
5960    ///
5961    /// The intent of the extra logging is to be acceptable from a performance
5962    /// point of view, under the assumption that verbose logging is only enabled
5963    /// on a low number of buffer collections. If we're not tracking down a bug,
5964    /// we shouldn't send this message.
5965    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
5966        self.client.send::<fidl::encoding::EmptyPayload>(
5967            (),
5968            0x5209c77415b4dfad,
5969            fidl::encoding::DynamicFlags::FLEXIBLE,
5970        )
5971    }
5972
5973    /// This gets a handle that can be used as a parameter to
5974    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
5975    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
5976    /// client obtained this handle from this `Node`.
5977    ///
5978    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
5979    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
5980    /// despite the two calls typically being on different channels.
5981    ///
5982    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
5983    ///
5984    /// All table fields are currently required.
5985    ///
5986    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
5987    ///   different `Node` channel, to prove that the client obtained the handle
5988    ///   from this `Node`.
5989    pub fn r#get_node_ref(
5990        &self,
5991        ___deadline: zx::MonotonicInstant,
5992    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
5993        let _response = self.client.send_query::<
5994            fidl::encoding::EmptyPayload,
5995            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
5996        >(
5997            (),
5998            0x5b3d0e51614df053,
5999            fidl::encoding::DynamicFlags::FLEXIBLE,
6000            ___deadline,
6001        )?
6002        .into_result::<BufferCollectionTokenMarker>("get_node_ref")?;
6003        Ok(_response)
6004    }
6005
6006    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
6007    /// rooted at a different child token of a common parent
6008    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
6009    /// passed-in `node_ref`.
6010    ///
6011    /// This call is for assisting with admission control de-duplication, and
6012    /// with debugging.
6013    ///
6014    /// The `node_ref` must be obtained using
6015    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
6016    ///
6017    /// The `node_ref` can be a duplicated handle; it's not necessary to call
6018    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
6019    ///
6020    /// If a calling token may not actually be a valid token at all due to a
6021    /// potentially hostile/untrusted provider of the token, call
6022    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6023    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
6024    /// never responds due to a calling token not being a real token (not really
6025    /// talking to sysmem).  Another option is to call
6026    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
6027    /// which also validates the token along with converting it to a
6028    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
6029    ///
6030    /// All table fields are currently required.
6031    ///
6032    /// - response `is_alternate`
6033    ///   - true: The first parent node in common between the calling node and
6034    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
6035    ///     that the calling `Node` and the `node_ref` `Node` will not have both
6036    ///     their constraints apply - rather sysmem will choose one or the other
6037    ///     of the constraints - never both.  This is because only one child of
6038    ///     a `BufferCollectionTokenGroup` is selected during logical
6039    ///     allocation, with only that one child's subtree contributing to
6040    ///     constraints aggregation.
6041    ///   - false: The first parent node in common between the calling `Node`
6042    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
6043    ///     Currently, this means the first parent node in common is a
6044    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
6045    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
6046    ///     `Node` may have both their constraints apply during constraints
6047    ///     aggregation of the logical allocation, if both `Node`(s) are
6048    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
6049    ///     this case, there is no `BufferCollectionTokenGroup` that will
6050    ///     directly prevent the two `Node`(s) from both being selected and
6051    ///     their constraints both aggregated, but even when false, one or both
6052    ///     `Node`(s) may still be eliminated from consideration if one or both
6053    ///     `Node`(s) has a direct or indirect parent
6054    ///     `BufferCollectionTokenGroup` which selects a child subtree other
6055    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
6056    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
6057    ///   associated with the same buffer collection as the calling `Node`.
6058    ///   Another reason for this error is if the `node_ref` is an
6059    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
6060    ///   a real `node_ref` obtained from `GetNodeRef`.
6061    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
6062    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
6063    ///   the needed rights expected on a real `node_ref`.
6064    /// * No other failing status codes are returned by this call.  However,
6065    ///   sysmem may add additional codes in future, so the client should have
6066    ///   sensible default handling for any failing status code.
6067    pub fn r#is_alternate_for(
6068        &self,
6069        mut payload: NodeIsAlternateForRequest,
6070        ___deadline: zx::MonotonicInstant,
6071    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
6072        let _response = self.client.send_query::<
6073            NodeIsAlternateForRequest,
6074            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
6075        >(
6076            &mut payload,
6077            0x3a58e00157e0825,
6078            fidl::encoding::DynamicFlags::FLEXIBLE,
6079            ___deadline,
6080        )?
6081        .into_result::<BufferCollectionTokenMarker>("is_alternate_for")?;
6082        Ok(_response.map(|x| x))
6083    }
6084
6085    /// Get the buffer collection ID. This ID is also available from
6086    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
6087    /// within the collection).
6088    ///
6089    /// This call is mainly useful in situations where we can't convey a
6090    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
6091    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
6092    /// handle, which can be joined back up with a `BufferCollection` client end
6093    /// that was created via a different path. Prefer to convey a
6094    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
6095    ///
6096    /// Trusting a `buffer_collection_id` value from a source other than sysmem
6097    /// is analogous to trusting a koid value from a source other than zircon.
6098    /// Both should be avoided unless really necessary, and both require
6099    /// caution. In some situations it may be reasonable to refer to a
6100    /// pre-established `BufferCollection` by `buffer_collection_id` via a
6101    /// protocol for efficiency reasons, but an incoming value purporting to be
6102    /// a `buffer_collection_id` is not sufficient alone to justify granting the
6103    /// sender of the `buffer_collection_id` any capability. The sender must
6104    /// first prove to a receiver that the sender has/had a VMO or has/had a
6105    /// `BufferCollectionToken` to the same collection by sending a handle that
6106    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
6107    /// `buffer_collection_id` value. The receiver should take care to avoid
6108    /// assuming that a sender had a `BufferCollectionToken` in cases where the
6109    /// sender has only proven that the sender had a VMO.
6110    ///
6111    /// - response `buffer_collection_id` This ID is unique per buffer
6112    ///   collection per boot. Each buffer is uniquely identified by the
6113    ///   `buffer_collection_id` and `buffer_index` together.
6114    pub fn r#get_buffer_collection_id(
6115        &self,
6116        ___deadline: zx::MonotonicInstant,
6117    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
6118        let _response = self.client.send_query::<
6119            fidl::encoding::EmptyPayload,
6120            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
6121        >(
6122            (),
6123            0x77d19a494b78ba8c,
6124            fidl::encoding::DynamicFlags::FLEXIBLE,
6125            ___deadline,
6126        )?
6127        .into_result::<BufferCollectionTokenMarker>("get_buffer_collection_id")?;
6128        Ok(_response)
6129    }
6130
6131    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
6132    /// created after this message to weak, which means that a client's `Node`
6133    /// client end (or a child created after this message) is not alone
6134    /// sufficient to keep allocated VMOs alive.
6135    ///
6136    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
6137    /// `close_weak_asap`.
6138    ///
6139    /// This message is only permitted before the `Node` becomes ready for
6140    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
6141    ///   * `BufferCollectionToken`: any time
6142    ///   * `BufferCollection`: before `SetConstraints`
6143    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
6144    ///
6145    /// Currently, no conversion from strong `Node` to weak `Node` after ready
6146    /// for allocation is provided, but a client can simulate that by creating
6147    /// an additional `Node` before allocation and setting that additional
6148    /// `Node` to weak, and then potentially at some point later sending
6149    /// `Release` and closing the client end of the client's strong `Node`, but
6150    /// keeping the client's weak `Node`.
6151    ///
6152    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
6153    /// collection failure (all `Node` client end(s) will see
6154    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
6155    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
6156    /// this situation until all `Node`(s) are ready for allocation. For initial
6157    /// allocation to succeed, at least one strong `Node` is required to exist
6158    /// at allocation time, but after that client receives VMO handles, that
6159    /// client can `BufferCollection.Release` and close the client end without
6160    /// causing this type of failure.
6161    ///
6162    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
6163    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
6164    /// separately as appropriate.
6165    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
6166        self.client.send::<fidl::encoding::EmptyPayload>(
6167            (),
6168            0x22dd3ea514eeffe1,
6169            fidl::encoding::DynamicFlags::FLEXIBLE,
6170        )
6171    }
6172
6173    /// This indicates to sysmem that the client is prepared to pay attention to
6174    /// `close_weak_asap`.
6175    ///
6176    /// If sent, this message must be before
6177    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
6178    ///
6179    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
6180    /// send this message before `WaitForAllBuffersAllocated`, or a parent
6181    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
6182    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
6183    /// trigger buffer collection failure.
6184    ///
6185    /// This message is necessary because weak sysmem VMOs have not always been
6186    /// a thing, so older clients are not aware of the need to pay attention to
6187    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
6188    /// sysmem weak VMO handles asap. By having this message and requiring
6189    /// participants to indicate their acceptance of this aspect of the overall
6190    /// protocol, we avoid situations where an older client is delivered a weak
6191    /// VMO without any way for sysmem to get that VMO to close quickly later
6192    /// (and on a per-buffer basis).
6193    ///
6194    /// A participant that doesn't handle `close_weak_asap` and also doesn't
6195    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
6196    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
6197    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
6198    /// same participant has a child/delegate which does retrieve VMOs, that
6199    /// child/delegate will need to send `SetWeakOk` before
6200    /// `WaitForAllBuffersAllocated`.
6201    ///
6202    /// + request `for_child_nodes_also` If present and true, this means direct
6203    ///   child nodes of this node created after this message plus all
6204    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
6205    ///   those nodes. Any child node of this node that was created before this
6206    ///   message is not included. This setting is "sticky" in the sense that a
6207    ///   subsequent `SetWeakOk` without this bool set to true does not reset
6208    ///   the server-side bool. If this creates a problem for a participant, a
6209    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
6210    ///   tokens instead, as appropriate. A participant should only set
6211    ///   `for_child_nodes_also` true if the participant can really promise to
6212    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
6213    ///   weak VMO handles held by participants holding the corresponding child
6214    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
6215    ///   which are using sysmem(1) can be weak, despite the clients of those
6216    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
6217    ///   direct way to find out about `close_weak_asap`. This only applies to
6218    ///   descendents of this `Node` which are using sysmem(1), not to this
6219    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
6220    ///   token, which will fail allocation unless an ancestor of this `Node`
6221    ///   specified `for_child_nodes_also` true.
6222    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
6223        self.client.send::<NodeSetWeakOkRequest>(
6224            &mut payload,
6225            0x38a44fc4d7724be9,
6226            fidl::encoding::DynamicFlags::FLEXIBLE,
6227        )
6228    }
6229
6230    /// The server_end will be closed after this `Node` and any child nodes have
6231    /// have released their buffer counts, making those counts available for
6232    /// reservation by a different `Node` via
6233    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
6234    ///
6235    /// The `Node` buffer counts may not be released until the entire tree of
6236    /// `Node`(s) is closed or failed, because
6237    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
6238    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
6239    /// `Node` buffer counts remain reserved until the orphaned node is later
6240    /// cleaned up.
6241    ///
6242    /// If the `Node` exceeds a fairly large number of attached eventpair server
6243    /// ends, a log message will indicate this and the `Node` (and the
6244    /// appropriate) sub-tree will fail.
6245    ///
6246    /// The `server_end` will remain open when
6247    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
6248    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
6249    /// [`fuchsia.sysmem2/BufferCollection`].
6250    ///
6251    /// This message can also be used with a
6252    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6253    pub fn r#attach_node_tracking(
6254        &self,
6255        mut payload: NodeAttachNodeTrackingRequest,
6256    ) -> Result<(), fidl::Error> {
6257        self.client.send::<NodeAttachNodeTrackingRequest>(
6258            &mut payload,
6259            0x3f22f2a293d3cdac,
6260            fidl::encoding::DynamicFlags::FLEXIBLE,
6261        )
6262    }
6263
6264    /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
6265    /// one, referring to the same buffer collection.
6266    ///
6267    /// The created tokens are children of this token in the
6268    /// [`fuchsia.sysmem2/Node`] heirarchy.
6269    ///
6270    /// This method can be used to add more participants, by transferring the
6271    /// newly created tokens to additional participants.
6272    ///
6273    /// A new token will be returned for each entry in the
6274    /// `rights_attenuation_masks` array.
6275    ///
6276    /// If the called token may not actually be a valid token due to a
6277    /// potentially hostile/untrusted provider of the token, consider using
6278    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6279    /// instead of potentially getting stuck indefinitely if
6280    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
6281    /// due to the calling token not being a real token.
6282    ///
6283    /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
6284    /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
6285    /// method, because the sync step is included in this call, at the cost of a
6286    /// round trip during this call.
6287    ///
6288    /// All tokens must be turned in to sysmem via
6289    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
6290    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
6291    /// successfully allocate buffers (or to logically allocate buffers in the
6292    /// case of subtrees involving
6293    /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
6294    ///
6295    /// All table fields are currently required.
6296    ///
6297    /// + request `rights_attenuation_mask` In each entry of
6298    ///   `rights_attenuation_masks`, rights bits that are zero will be absent
6299    ///   in the buffer VMO rights obtainable via the corresponding returned
6300    ///   token. This allows an initiator or intermediary participant to
6301    ///   attenuate the rights available to a participant. This does not allow a
6302    ///   participant to gain rights that the participant doesn't already have.
6303    ///   The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
6304    ///   attenuation should be applied.
6305    /// - response `tokens` The client ends of each newly created token.
6306    pub fn r#duplicate_sync(
6307        &self,
6308        mut payload: &BufferCollectionTokenDuplicateSyncRequest,
6309        ___deadline: zx::MonotonicInstant,
6310    ) -> Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error> {
6311        let _response = self.client.send_query::<
6312            BufferCollectionTokenDuplicateSyncRequest,
6313            fidl::encoding::FlexibleType<BufferCollectionTokenDuplicateSyncResponse>,
6314        >(
6315            payload,
6316            0x1c1af9919d1ca45c,
6317            fidl::encoding::DynamicFlags::FLEXIBLE,
6318            ___deadline,
6319        )?
6320        .into_result::<BufferCollectionTokenMarker>("duplicate_sync")?;
6321        Ok(_response)
6322    }
6323
6324    /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
6325    /// one, referring to the same buffer collection.
6326    ///
6327    /// The created token is a child of this token in the
6328    /// [`fuchsia.sysmem2/Node`] heirarchy.
6329    ///
6330    /// This method can be used to add a participant, by transferring the newly
6331    /// created token to another participant.
6332    ///
6333    /// This one-way message can be used instead of the two-way
6334    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
6335    /// performance sensitive cases where it would be undesireable to wait for
6336    /// sysmem to respond to
6337    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
6338    /// client code isn't structured to make it easy to duplicate all the needed
6339    /// tokens at once.
6340    ///
6341    /// After sending one or more `Duplicate` messages, and before sending the
6342    /// newly created child tokens to other participants (or to other
6343    /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
6344    /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
6345    /// `Sync` call can be made on the token, or on the `BufferCollection`
6346    /// obtained by passing this token to `BindSharedCollection`.  Either will
6347    /// ensure that the server knows about the tokens created via `Duplicate`
6348    /// before the other participant sends the token to the server via separate
6349    /// `Allocator` channel.
6350    ///
6351    /// All tokens must be turned in via
6352    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
6353    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
6354    /// successfully allocate buffers.
6355    ///
6356    /// All table fields are currently required.
6357    ///
6358    /// + request `rights_attenuation_mask` The rights bits that are zero in
6359    ///   this mask will be absent in the buffer VMO rights obtainable via the
6360    ///   client end of `token_request`. This allows an initiator or
6361    ///   intermediary participant to attenuate the rights available to a
6362    ///   delegate participant. This does not allow a participant to gain rights
6363    ///   that the participant doesn't already have. The value
6364    ///   `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
6365    ///   should be applied.
6366    ///   + These values for rights_attenuation_mask result in no attenuation:
6367    ///     + `ZX_RIGHT_SAME_RIGHTS` (preferred)
6368    ///     + 0xFFFFFFFF (this is reasonable when an attenuation mask is
6369    ///       computed)
6370    ///     + 0 (deprecated - do not use 0 - an ERROR will go to the log)
6371    /// + request `token_request` is the server end of a `BufferCollectionToken`
6372    ///   channel. The client end of this channel acts as another participant in
6373    ///   the shared buffer collection.
6374    pub fn r#duplicate(
6375        &self,
6376        mut payload: BufferCollectionTokenDuplicateRequest,
6377    ) -> Result<(), fidl::Error> {
6378        self.client.send::<BufferCollectionTokenDuplicateRequest>(
6379            &mut payload,
6380            0x73e78f92ee7fb887,
6381            fidl::encoding::DynamicFlags::FLEXIBLE,
6382        )
6383    }
6384
6385    /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
6386    ///
6387    /// When the `BufferCollectionToken` is converted to a
6388    /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
6389    /// the `BufferCollection` also.
6390    ///
6391    /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
6392    /// client end without having sent
6393    /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
6394    /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
6395    /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
6396    /// to the root `Node`, which fails the whole buffer collection. In
6397    /// contrast, a dispensable `Node` can fail after buffers are allocated
6398    /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
6399    /// heirarchy.
6400    ///
6401    /// The dispensable `Node` participates in constraints aggregation along
6402    /// with its parent before buffer allocation. If the dispensable `Node`
6403    /// fails before buffers are allocated, the failure propagates to the
6404    /// dispensable `Node`'s parent.
6405    ///
6406    /// After buffers are allocated, failure of the dispensable `Node` (or any
6407    /// child of the dispensable `Node`) does not propagate to the dispensable
6408    /// `Node`'s parent. Failure does propagate from a normal child of a
6409    /// dispensable `Node` to the dispensable `Node`.  Failure of a child is
6410    /// blocked from reaching its parent if the child is attached using
6411    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
6412    /// dispensable and the failure occurred after allocation.
6413    ///
6414    /// A dispensable `Node` can be used in cases where a participant needs to
6415    /// provide constraints, but after buffers are allocated, the participant
6416    /// can fail without causing buffer collection failure from the parent
6417    /// `Node`'s point of view.
6418    ///
6419    /// In contrast, `BufferCollection.AttachToken` can be used to create a
6420    /// `BufferCollectionToken` which does not participate in constraints
6421    /// aggregation with its parent `Node`, and whose failure at any time does
6422    /// not propagate to its parent `Node`, and whose potential delay providing
6423    /// constraints does not prevent the parent `Node` from completing its
6424    /// buffer allocation.
6425    ///
6426    /// An initiator (creator of the root `Node` using
6427    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
6428    /// scenarios choose to initially use a dispensable `Node` for a first
6429    /// instance of a participant, and then later if the first instance of that
6430    /// participant fails, a new second instance of that participant my be given
6431    /// a `BufferCollectionToken` created with `AttachToken`.
6432    ///
6433    /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
6434    /// shortly before sending the dispensable `BufferCollectionToken` to a
6435    /// delegate participant. Because `SetDispensable` prevents propagation of
6436    /// child `Node` failure to parent `Node`(s), if the client was relying on
6437    /// noticing child failure via failure of the parent `Node` retained by the
6438    /// client, the client may instead need to notice failure via other means.
6439    /// If other means aren't available/convenient, the client can instead
6440    /// retain the dispensable `Node` and create a child `Node` under that to
6441    /// send to the delegate participant, retaining this `Node` in order to
6442    /// notice failure of the subtree rooted at this `Node` via this `Node`'s
6443    /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
6444    /// (e.g. starting a new instance of the delegate participant and handing it
6445    /// a `BufferCollectionToken` created using
6446    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
6447    /// and clean up in a client-specific way).
6448    ///
6449    /// While it is possible (and potentially useful) to `SetDispensable` on a
6450    /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
6451    /// to later replace a failed dispensable `Node` that was a direct child of
6452    /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
6453    /// (since there's no `AttachToken` on a group). Instead, to enable
6454    /// `AttachToken` replacement in this case, create an additional
6455    /// non-dispensable token that's a direct child of the group and make the
6456    /// existing dispensable token a child of the additional token.  This way,
6457    /// the additional token that is a direct child of the group has
6458    /// `BufferCollection.AttachToken` which can be used to replace the failed
6459    /// dispensable token.
6460    ///
6461    /// `SetDispensable` on an already-dispensable token is idempotent.
6462    pub fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
6463        self.client.send::<fidl::encoding::EmptyPayload>(
6464            (),
6465            0x228acf979254df8b,
6466            fidl::encoding::DynamicFlags::FLEXIBLE,
6467        )
6468    }
6469
6470    /// Create a logical OR among a set of tokens, called a
6471    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
6472    ///
6473    /// Most sysmem clients and many participants don't need to care about this
6474    /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
6475    /// a participant wants to attempt to include one set of delegate
6476    /// participants, but if constraints don't combine successfully that way,
6477    /// fall back to a different (possibly overlapping) set of delegate
6478    /// participants, and/or fall back to a less demanding strategy (in terms of
6479    /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
6480    /// across all involved delegate participants). In such cases, a
6481    /// `BufferCollectionTokenGroup` is useful.
6482    ///
6483    /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
6484    /// child [`fuchsia.sysmem2/BufferCollectionToken`](s).  The child tokens
6485    /// which are not selected during aggregation will fail (close), which a
6486    /// potential participant should notice when their `BufferCollection`
6487    /// channel client endpoint sees PEER_CLOSED, allowing the participant to
6488    /// clean up the speculative usage that didn't end up happening (this is
6489    /// simimlar to a normal `BufferCollection` server end closing on failure to
6490    /// allocate a logical buffer collection or later async failure of a buffer
6491    /// collection).
6492    ///
6493    /// See comments on protocol `BufferCollectionTokenGroup`.
6494    ///
6495    /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
6496    /// applied to the whole group can be achieved with a
6497    /// `BufferCollectionToken` for this purpose as a direct parent of the
6498    /// `BufferCollectionTokenGroup`.
6499    ///
6500    /// All table fields are currently required.
6501    ///
6502    /// + request `group_request` The server end of a
6503    ///   `BufferCollectionTokenGroup` channel to be served by sysmem.
6504    pub fn r#create_buffer_collection_token_group(
6505        &self,
6506        mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
6507    ) -> Result<(), fidl::Error> {
6508        self.client.send::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
6509            &mut payload,
6510            0x30f8d48e77bd36f2,
6511            fidl::encoding::DynamicFlags::FLEXIBLE,
6512        )
6513    }
6514}
6515
6516#[cfg(target_os = "fuchsia")]
6517impl From<BufferCollectionTokenSynchronousProxy> for zx::Handle {
6518    fn from(value: BufferCollectionTokenSynchronousProxy) -> Self {
6519        value.into_channel().into()
6520    }
6521}
6522
6523#[cfg(target_os = "fuchsia")]
6524impl From<fidl::Channel> for BufferCollectionTokenSynchronousProxy {
6525    fn from(value: fidl::Channel) -> Self {
6526        Self::new(value)
6527    }
6528}
6529
6530#[derive(Debug, Clone)]
6531pub struct BufferCollectionTokenProxy {
6532    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
6533}
6534
6535impl fidl::endpoints::Proxy for BufferCollectionTokenProxy {
6536    type Protocol = BufferCollectionTokenMarker;
6537
6538    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
6539        Self::new(inner)
6540    }
6541
6542    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
6543        self.client.into_channel().map_err(|client| Self { client })
6544    }
6545
6546    fn as_channel(&self) -> &::fidl::AsyncChannel {
6547        self.client.as_channel()
6548    }
6549}
6550
6551impl BufferCollectionTokenProxy {
6552    /// Create a new Proxy for fuchsia.sysmem2/BufferCollectionToken.
6553    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
6554        let protocol_name =
6555            <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
6556        Self { client: fidl::client::Client::new(channel, protocol_name) }
6557    }
6558
6559    /// Get a Stream of events from the remote end of the protocol.
6560    ///
6561    /// # Panics
6562    ///
6563    /// Panics if the event stream was already taken.
6564    pub fn take_event_stream(&self) -> BufferCollectionTokenEventStream {
6565        BufferCollectionTokenEventStream { event_receiver: self.client.take_event_receiver() }
6566    }
6567
6568    /// Ensure that previous messages have been received server side. This is
6569    /// particularly useful after previous messages that created new tokens,
6570    /// because a token must be known to the sysmem server before sending the
6571    /// token to another participant.
6572    ///
6573    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
6574    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
6575    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
6576    /// to mitigate the possibility of a hostile/fake
6577    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
6578    /// Another way is to pass the token to
6579    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
6580    /// the token as part of exchanging it for a
6581    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
6582    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
6583    /// of stalling.
6584    ///
6585    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
6586    /// and then starting and completing a `Sync`, it's then safe to send the
6587    /// `BufferCollectionToken` client ends to other participants knowing the
6588    /// server will recognize the tokens when they're sent by the other
6589    /// participants to sysmem in a
6590    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
6591    /// efficient way to create tokens while avoiding unnecessary round trips.
6592    ///
6593    /// Other options include waiting for each
6594    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
6595    /// individually (using separate call to `Sync` after each), or calling
6596    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
6597    /// converted to a `BufferCollection` via
6598    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
6599    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
6600    /// the sync step and can create multiple tokens at once.
6601    pub fn r#sync(
6602        &self,
6603    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
6604        BufferCollectionTokenProxyInterface::r#sync(self)
6605    }
6606
6607    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
6608    ///
6609    /// Normally a participant will convert a `BufferCollectionToken` into a
6610    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
6611    /// `Release` via the token (and then close the channel immediately or
6612    /// shortly later in response to server closing the server end), which
6613    /// avoids causing buffer collection failure. Without a prior `Release`,
6614    /// closing the `BufferCollectionToken` client end will cause buffer
6615    /// collection failure.
6616    ///
6617    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
6618    ///
6619    /// By default the server handles unexpected closure of a
6620    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
6621    /// first) by failing the buffer collection. Partly this is to expedite
6622    /// closing VMO handles to reclaim memory when any participant fails. If a
6623    /// participant would like to cleanly close a `BufferCollection` without
6624    /// causing buffer collection failure, the participant can send `Release`
6625    /// before closing the `BufferCollection` client end. The `Release` can
6626    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
6627    /// buffer collection won't require constraints from this node in order to
6628    /// allocate. If after `SetConstraints`, the constraints are retained and
6629    /// aggregated, despite the lack of `BufferCollection` connection at the
6630    /// time of constraints aggregation.
6631    ///
6632    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
6633    ///
6634    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
6635    /// end (without `Release` first) will trigger failure of the buffer
6636    /// collection. To close a `BufferCollectionTokenGroup` channel without
6637    /// failing the buffer collection, ensure that AllChildrenPresent() has been
6638    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
6639    /// client end.
6640    ///
6641    /// If `Release` occurs before
6642    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
6643    /// buffer collection will fail (triggered by reception of `Release` without
6644    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
6645    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
6646    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
6647    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
6648    /// close requires `AllChildrenPresent` (if not already sent), then
6649    /// `Release`, then close client end.
6650    ///
6651    /// If `Release` occurs after `AllChildrenPresent`, the children and all
6652    /// their constraints remain intact (just as they would if the
6653    /// `BufferCollectionTokenGroup` channel had remained open), and the client
6654    /// end close doesn't trigger buffer collection failure.
6655    ///
6656    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
6657    ///
6658    /// For brevity, the per-channel-protocol paragraphs above ignore the
6659    /// separate failure domain created by
6660    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
6661    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
6662    /// unexpectedly closes (without `Release` first) and that client end is
6663    /// under a failure domain, instead of failing the whole buffer collection,
6664    /// the failure domain is failed, but the buffer collection itself is
6665    /// isolated from failure of the failure domain. Such failure domains can be
6666    /// nested, in which case only the inner-most failure domain in which the
6667    /// `Node` resides fails.
6668    pub fn r#release(&self) -> Result<(), fidl::Error> {
6669        BufferCollectionTokenProxyInterface::r#release(self)
6670    }
6671
6672    /// Set a name for VMOs in this buffer collection.
6673    ///
6674    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
6675    /// will be truncated to fit. The name of the vmo will be suffixed with the
6676    /// buffer index within the collection (if the suffix fits within
6677    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
6678    /// listed in the inspect data.
6679    ///
6680    /// The name only affects VMOs allocated after the name is set; this call
6681    /// does not rename existing VMOs. If multiple clients set different names
6682    /// then the larger priority value will win. Setting a new name with the
6683    /// same priority as a prior name doesn't change the name.
6684    ///
6685    /// All table fields are currently required.
6686    ///
6687    /// + request `priority` The name is only set if this is the first `SetName`
6688    ///   or if `priority` is greater than any previous `priority` value in
6689    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
6690    /// + request `name` The name for VMOs created under this buffer collection.
6691    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
6692        BufferCollectionTokenProxyInterface::r#set_name(self, payload)
6693    }
6694
6695    /// Set information about the current client that can be used by sysmem to
6696    /// help diagnose leaking memory and allocation stalls waiting for a
6697    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
6698    ///
6699    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
6700    /// `Node`(s) derived from this `Node`, unless overriden by
6701    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
6702    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
6703    ///
6704    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
6705    /// `Allocator` is the most efficient way to ensure that all
6706    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
6707    /// set, and is also more efficient than separately sending the same debug
6708    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
6709    /// created [`fuchsia.sysmem2/Node`].
6710    ///
6711    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
6712    /// indicate which client is closing their channel first, leading to subtree
6713    /// failure (which can be normal if the purpose of the subtree is over, but
6714    /// if happening earlier than expected, the client-channel-specific name can
6715    /// help diagnose where the failure is first coming from, from sysmem's
6716    /// point of view).
6717    ///
6718    /// All table fields are currently required.
6719    ///
6720    /// + request `name` This can be an arbitrary string, but the current
6721    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
6722    /// + request `id` This can be an arbitrary id, but the current process ID
6723    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
6724    pub fn r#set_debug_client_info(
6725        &self,
6726        mut payload: &NodeSetDebugClientInfoRequest,
6727    ) -> Result<(), fidl::Error> {
6728        BufferCollectionTokenProxyInterface::r#set_debug_client_info(self, payload)
6729    }
6730
6731    /// Sysmem logs a warning if sysmem hasn't seen
6732    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
6733    /// within 5 seconds after creation of a new collection.
6734    ///
6735    /// Clients can call this method to change when the log is printed. If
6736    /// multiple client set the deadline, it's unspecified which deadline will
6737    /// take effect.
6738    ///
6739    /// In most cases the default works well.
6740    ///
6741    /// All table fields are currently required.
6742    ///
6743    /// + request `deadline` The time at which sysmem will start trying to log
6744    ///   the warning, unless all constraints are with sysmem by then.
6745    pub fn r#set_debug_timeout_log_deadline(
6746        &self,
6747        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
6748    ) -> Result<(), fidl::Error> {
6749        BufferCollectionTokenProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
6750    }
6751
6752    /// This enables verbose logging for the buffer collection.
6753    ///
6754    /// Verbose logging includes constraints set via
6755    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
6756    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
6757    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
6758    /// the tree of `Node`(s).
6759    ///
6760    /// Normally sysmem prints only a single line complaint when aggregation
6761    /// fails, with just the specific detailed reason that aggregation failed,
6762    /// with little surrounding context.  While this is often enough to diagnose
6763    /// a problem if only a small change was made and everything was working
6764    /// before the small change, it's often not particularly helpful for getting
6765    /// a new buffer collection to work for the first time.  Especially with
6766    /// more complex trees of nodes, involving things like
6767    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
6768    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
6769    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
6770    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
6771    /// looks like and why it's failing a logical allocation, or why a tree or
6772    /// subtree is failing sooner than expected.
6773    ///
6774    /// The intent of the extra logging is to be acceptable from a performance
6775    /// point of view, under the assumption that verbose logging is only enabled
6776    /// on a low number of buffer collections. If we're not tracking down a bug,
6777    /// we shouldn't send this message.
6778    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
6779        BufferCollectionTokenProxyInterface::r#set_verbose_logging(self)
6780    }
6781
6782    /// This gets a handle that can be used as a parameter to
6783    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
6784    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
6785    /// client obtained this handle from this `Node`.
6786    ///
6787    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
6788    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
6789    /// despite the two calls typically being on different channels.
6790    ///
6791    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
6792    ///
6793    /// All table fields are currently required.
6794    ///
6795    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
6796    ///   different `Node` channel, to prove that the client obtained the handle
6797    ///   from this `Node`.
6798    pub fn r#get_node_ref(
6799        &self,
6800    ) -> fidl::client::QueryResponseFut<
6801        NodeGetNodeRefResponse,
6802        fidl::encoding::DefaultFuchsiaResourceDialect,
6803    > {
6804        BufferCollectionTokenProxyInterface::r#get_node_ref(self)
6805    }
6806
6807    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
6808    /// rooted at a different child token of a common parent
6809    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
6810    /// passed-in `node_ref`.
6811    ///
6812    /// This call is for assisting with admission control de-duplication, and
6813    /// with debugging.
6814    ///
6815    /// The `node_ref` must be obtained using
6816    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
6817    ///
6818    /// The `node_ref` can be a duplicated handle; it's not necessary to call
6819    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
6820    ///
6821    /// If a calling token may not actually be a valid token at all due to a
6822    /// potentially hostile/untrusted provider of the token, call
6823    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
6824    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
6825    /// never responds due to a calling token not being a real token (not really
6826    /// talking to sysmem).  Another option is to call
6827    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
6828    /// which also validates the token along with converting it to a
6829    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
6830    ///
6831    /// All table fields are currently required.
6832    ///
6833    /// - response `is_alternate`
6834    ///   - true: The first parent node in common between the calling node and
6835    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
6836    ///     that the calling `Node` and the `node_ref` `Node` will not have both
6837    ///     their constraints apply - rather sysmem will choose one or the other
6838    ///     of the constraints - never both.  This is because only one child of
6839    ///     a `BufferCollectionTokenGroup` is selected during logical
6840    ///     allocation, with only that one child's subtree contributing to
6841    ///     constraints aggregation.
6842    ///   - false: The first parent node in common between the calling `Node`
6843    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
6844    ///     Currently, this means the first parent node in common is a
6845    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
6846    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
6847    ///     `Node` may have both their constraints apply during constraints
6848    ///     aggregation of the logical allocation, if both `Node`(s) are
6849    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
6850    ///     this case, there is no `BufferCollectionTokenGroup` that will
6851    ///     directly prevent the two `Node`(s) from both being selected and
6852    ///     their constraints both aggregated, but even when false, one or both
6853    ///     `Node`(s) may still be eliminated from consideration if one or both
6854    ///     `Node`(s) has a direct or indirect parent
6855    ///     `BufferCollectionTokenGroup` which selects a child subtree other
6856    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
6857    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
6858    ///   associated with the same buffer collection as the calling `Node`.
6859    ///   Another reason for this error is if the `node_ref` is an
6860    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
6861    ///   a real `node_ref` obtained from `GetNodeRef`.
6862    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
6863    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
6864    ///   the needed rights expected on a real `node_ref`.
6865    /// * No other failing status codes are returned by this call.  However,
6866    ///   sysmem may add additional codes in future, so the client should have
6867    ///   sensible default handling for any failing status code.
6868    pub fn r#is_alternate_for(
6869        &self,
6870        mut payload: NodeIsAlternateForRequest,
6871    ) -> fidl::client::QueryResponseFut<
6872        NodeIsAlternateForResult,
6873        fidl::encoding::DefaultFuchsiaResourceDialect,
6874    > {
6875        BufferCollectionTokenProxyInterface::r#is_alternate_for(self, payload)
6876    }
6877
6878    /// Get the buffer collection ID. This ID is also available from
6879    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
6880    /// within the collection).
6881    ///
6882    /// This call is mainly useful in situations where we can't convey a
6883    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
6884    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
6885    /// handle, which can be joined back up with a `BufferCollection` client end
6886    /// that was created via a different path. Prefer to convey a
6887    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
6888    ///
6889    /// Trusting a `buffer_collection_id` value from a source other than sysmem
6890    /// is analogous to trusting a koid value from a source other than zircon.
6891    /// Both should be avoided unless really necessary, and both require
6892    /// caution. In some situations it may be reasonable to refer to a
6893    /// pre-established `BufferCollection` by `buffer_collection_id` via a
6894    /// protocol for efficiency reasons, but an incoming value purporting to be
6895    /// a `buffer_collection_id` is not sufficient alone to justify granting the
6896    /// sender of the `buffer_collection_id` any capability. The sender must
6897    /// first prove to a receiver that the sender has/had a VMO or has/had a
6898    /// `BufferCollectionToken` to the same collection by sending a handle that
6899    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
6900    /// `buffer_collection_id` value. The receiver should take care to avoid
6901    /// assuming that a sender had a `BufferCollectionToken` in cases where the
6902    /// sender has only proven that the sender had a VMO.
6903    ///
6904    /// - response `buffer_collection_id` This ID is unique per buffer
6905    ///   collection per boot. Each buffer is uniquely identified by the
6906    ///   `buffer_collection_id` and `buffer_index` together.
6907    pub fn r#get_buffer_collection_id(
6908        &self,
6909    ) -> fidl::client::QueryResponseFut<
6910        NodeGetBufferCollectionIdResponse,
6911        fidl::encoding::DefaultFuchsiaResourceDialect,
6912    > {
6913        BufferCollectionTokenProxyInterface::r#get_buffer_collection_id(self)
6914    }
6915
6916    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
6917    /// created after this message to weak, which means that a client's `Node`
6918    /// client end (or a child created after this message) is not alone
6919    /// sufficient to keep allocated VMOs alive.
6920    ///
6921    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
6922    /// `close_weak_asap`.
6923    ///
6924    /// This message is only permitted before the `Node` becomes ready for
6925    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
6926    ///   * `BufferCollectionToken`: any time
6927    ///   * `BufferCollection`: before `SetConstraints`
6928    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
6929    ///
6930    /// Currently, no conversion from strong `Node` to weak `Node` after ready
6931    /// for allocation is provided, but a client can simulate that by creating
6932    /// an additional `Node` before allocation and setting that additional
6933    /// `Node` to weak, and then potentially at some point later sending
6934    /// `Release` and closing the client end of the client's strong `Node`, but
6935    /// keeping the client's weak `Node`.
6936    ///
6937    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
6938    /// collection failure (all `Node` client end(s) will see
6939    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
6940    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
6941    /// this situation until all `Node`(s) are ready for allocation. For initial
6942    /// allocation to succeed, at least one strong `Node` is required to exist
6943    /// at allocation time, but after that client receives VMO handles, that
6944    /// client can `BufferCollection.Release` and close the client end without
6945    /// causing this type of failure.
6946    ///
6947    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
6948    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
6949    /// separately as appropriate.
6950    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
6951        BufferCollectionTokenProxyInterface::r#set_weak(self)
6952    }
6953
6954    /// This indicates to sysmem that the client is prepared to pay attention to
6955    /// `close_weak_asap`.
6956    ///
6957    /// If sent, this message must be before
6958    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
6959    ///
6960    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
6961    /// send this message before `WaitForAllBuffersAllocated`, or a parent
6962    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
6963    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
6964    /// trigger buffer collection failure.
6965    ///
6966    /// This message is necessary because weak sysmem VMOs have not always been
6967    /// a thing, so older clients are not aware of the need to pay attention to
6968    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
6969    /// sysmem weak VMO handles asap. By having this message and requiring
6970    /// participants to indicate their acceptance of this aspect of the overall
6971    /// protocol, we avoid situations where an older client is delivered a weak
6972    /// VMO without any way for sysmem to get that VMO to close quickly later
6973    /// (and on a per-buffer basis).
6974    ///
6975    /// A participant that doesn't handle `close_weak_asap` and also doesn't
6976    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
6977    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
6978    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
6979    /// same participant has a child/delegate which does retrieve VMOs, that
6980    /// child/delegate will need to send `SetWeakOk` before
6981    /// `WaitForAllBuffersAllocated`.
6982    ///
6983    /// + request `for_child_nodes_also` If present and true, this means direct
6984    ///   child nodes of this node created after this message plus all
6985    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
6986    ///   those nodes. Any child node of this node that was created before this
6987    ///   message is not included. This setting is "sticky" in the sense that a
6988    ///   subsequent `SetWeakOk` without this bool set to true does not reset
6989    ///   the server-side bool. If this creates a problem for a participant, a
6990    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
6991    ///   tokens instead, as appropriate. A participant should only set
6992    ///   `for_child_nodes_also` true if the participant can really promise to
6993    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
6994    ///   weak VMO handles held by participants holding the corresponding child
6995    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
6996    ///   which are using sysmem(1) can be weak, despite the clients of those
6997    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
6998    ///   direct way to find out about `close_weak_asap`. This only applies to
6999    ///   descendents of this `Node` which are using sysmem(1), not to this
7000    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
7001    ///   token, which will fail allocation unless an ancestor of this `Node`
7002    ///   specified `for_child_nodes_also` true.
7003    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
7004        BufferCollectionTokenProxyInterface::r#set_weak_ok(self, payload)
7005    }
7006
7007    /// The server_end will be closed after this `Node` and any child nodes have
7008    /// have released their buffer counts, making those counts available for
7009    /// reservation by a different `Node` via
7010    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
7011    ///
7012    /// The `Node` buffer counts may not be released until the entire tree of
7013    /// `Node`(s) is closed or failed, because
7014    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
7015    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
7016    /// `Node` buffer counts remain reserved until the orphaned node is later
7017    /// cleaned up.
7018    ///
7019    /// If the `Node` exceeds a fairly large number of attached eventpair server
7020    /// ends, a log message will indicate this and the `Node` (and the
7021    /// appropriate) sub-tree will fail.
7022    ///
7023    /// The `server_end` will remain open when
7024    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
7025    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
7026    /// [`fuchsia.sysmem2/BufferCollection`].
7027    ///
7028    /// This message can also be used with a
7029    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
7030    pub fn r#attach_node_tracking(
7031        &self,
7032        mut payload: NodeAttachNodeTrackingRequest,
7033    ) -> Result<(), fidl::Error> {
7034        BufferCollectionTokenProxyInterface::r#attach_node_tracking(self, payload)
7035    }
7036
7037    /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
7038    /// one, referring to the same buffer collection.
7039    ///
7040    /// The created tokens are children of this token in the
7041    /// [`fuchsia.sysmem2/Node`] heirarchy.
7042    ///
7043    /// This method can be used to add more participants, by transferring the
7044    /// newly created tokens to additional participants.
7045    ///
7046    /// A new token will be returned for each entry in the
7047    /// `rights_attenuation_masks` array.
7048    ///
7049    /// If the called token may not actually be a valid token due to a
7050    /// potentially hostile/untrusted provider of the token, consider using
7051    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
7052    /// instead of potentially getting stuck indefinitely if
7053    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
7054    /// due to the calling token not being a real token.
7055    ///
7056    /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
7057    /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
7058    /// method, because the sync step is included in this call, at the cost of a
7059    /// round trip during this call.
7060    ///
7061    /// All tokens must be turned in to sysmem via
7062    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
7063    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
7064    /// successfully allocate buffers (or to logically allocate buffers in the
7065    /// case of subtrees involving
7066    /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
7067    ///
7068    /// All table fields are currently required.
7069    ///
7070    /// + request `rights_attenuation_mask` In each entry of
7071    ///   `rights_attenuation_masks`, rights bits that are zero will be absent
7072    ///   in the buffer VMO rights obtainable via the corresponding returned
7073    ///   token. This allows an initiator or intermediary participant to
7074    ///   attenuate the rights available to a participant. This does not allow a
7075    ///   participant to gain rights that the participant doesn't already have.
7076    ///   The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
7077    ///   attenuation should be applied.
7078    /// - response `tokens` The client ends of each newly created token.
7079    pub fn r#duplicate_sync(
7080        &self,
7081        mut payload: &BufferCollectionTokenDuplicateSyncRequest,
7082    ) -> fidl::client::QueryResponseFut<
7083        BufferCollectionTokenDuplicateSyncResponse,
7084        fidl::encoding::DefaultFuchsiaResourceDialect,
7085    > {
7086        BufferCollectionTokenProxyInterface::r#duplicate_sync(self, payload)
7087    }
7088
7089    /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
7090    /// one, referring to the same buffer collection.
7091    ///
7092    /// The created token is a child of this token in the
7093    /// [`fuchsia.sysmem2/Node`] heirarchy.
7094    ///
7095    /// This method can be used to add a participant, by transferring the newly
7096    /// created token to another participant.
7097    ///
7098    /// This one-way message can be used instead of the two-way
7099    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
7100    /// performance sensitive cases where it would be undesireable to wait for
7101    /// sysmem to respond to
7102    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
7103    /// client code isn't structured to make it easy to duplicate all the needed
7104    /// tokens at once.
7105    ///
7106    /// After sending one or more `Duplicate` messages, and before sending the
7107    /// newly created child tokens to other participants (or to other
7108    /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
7109    /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
7110    /// `Sync` call can be made on the token, or on the `BufferCollection`
7111    /// obtained by passing this token to `BindSharedCollection`.  Either will
7112    /// ensure that the server knows about the tokens created via `Duplicate`
7113    /// before the other participant sends the token to the server via separate
7114    /// `Allocator` channel.
7115    ///
7116    /// All tokens must be turned in via
7117    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
7118    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
7119    /// successfully allocate buffers.
7120    ///
7121    /// All table fields are currently required.
7122    ///
7123    /// + request `rights_attenuation_mask` The rights bits that are zero in
7124    ///   this mask will be absent in the buffer VMO rights obtainable via the
7125    ///   client end of `token_request`. This allows an initiator or
7126    ///   intermediary participant to attenuate the rights available to a
7127    ///   delegate participant. This does not allow a participant to gain rights
7128    ///   that the participant doesn't already have. The value
7129    ///   `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
7130    ///   should be applied.
7131    ///   + These values for rights_attenuation_mask result in no attenuation:
7132    ///     + `ZX_RIGHT_SAME_RIGHTS` (preferred)
7133    ///     + 0xFFFFFFFF (this is reasonable when an attenuation mask is
7134    ///       computed)
7135    ///     + 0 (deprecated - do not use 0 - an ERROR will go to the log)
7136    /// + request `token_request` is the server end of a `BufferCollectionToken`
7137    ///   channel. The client end of this channel acts as another participant in
7138    ///   the shared buffer collection.
7139    pub fn r#duplicate(
7140        &self,
7141        mut payload: BufferCollectionTokenDuplicateRequest,
7142    ) -> Result<(), fidl::Error> {
7143        BufferCollectionTokenProxyInterface::r#duplicate(self, payload)
7144    }
7145
7146    /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
7147    ///
7148    /// When the `BufferCollectionToken` is converted to a
7149    /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
7150    /// the `BufferCollection` also.
7151    ///
7152    /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
7153    /// client end without having sent
7154    /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
7155    /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
7156    /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
7157    /// to the root `Node`, which fails the whole buffer collection. In
7158    /// contrast, a dispensable `Node` can fail after buffers are allocated
7159    /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
7160    /// heirarchy.
7161    ///
7162    /// The dispensable `Node` participates in constraints aggregation along
7163    /// with its parent before buffer allocation. If the dispensable `Node`
7164    /// fails before buffers are allocated, the failure propagates to the
7165    /// dispensable `Node`'s parent.
7166    ///
7167    /// After buffers are allocated, failure of the dispensable `Node` (or any
7168    /// child of the dispensable `Node`) does not propagate to the dispensable
7169    /// `Node`'s parent. Failure does propagate from a normal child of a
7170    /// dispensable `Node` to the dispensable `Node`.  Failure of a child is
7171    /// blocked from reaching its parent if the child is attached using
7172    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
7173    /// dispensable and the failure occurred after allocation.
7174    ///
7175    /// A dispensable `Node` can be used in cases where a participant needs to
7176    /// provide constraints, but after buffers are allocated, the participant
7177    /// can fail without causing buffer collection failure from the parent
7178    /// `Node`'s point of view.
7179    ///
7180    /// In contrast, `BufferCollection.AttachToken` can be used to create a
7181    /// `BufferCollectionToken` which does not participate in constraints
7182    /// aggregation with its parent `Node`, and whose failure at any time does
7183    /// not propagate to its parent `Node`, and whose potential delay providing
7184    /// constraints does not prevent the parent `Node` from completing its
7185    /// buffer allocation.
7186    ///
7187    /// An initiator (creator of the root `Node` using
7188    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
7189    /// scenarios choose to initially use a dispensable `Node` for a first
7190    /// instance of a participant, and then later if the first instance of that
7191    /// participant fails, a new second instance of that participant my be given
7192    /// a `BufferCollectionToken` created with `AttachToken`.
7193    ///
7194    /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
7195    /// shortly before sending the dispensable `BufferCollectionToken` to a
7196    /// delegate participant. Because `SetDispensable` prevents propagation of
7197    /// child `Node` failure to parent `Node`(s), if the client was relying on
7198    /// noticing child failure via failure of the parent `Node` retained by the
7199    /// client, the client may instead need to notice failure via other means.
7200    /// If other means aren't available/convenient, the client can instead
7201    /// retain the dispensable `Node` and create a child `Node` under that to
7202    /// send to the delegate participant, retaining this `Node` in order to
7203    /// notice failure of the subtree rooted at this `Node` via this `Node`'s
7204    /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
7205    /// (e.g. starting a new instance of the delegate participant and handing it
7206    /// a `BufferCollectionToken` created using
7207    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
7208    /// and clean up in a client-specific way).
7209    ///
7210    /// While it is possible (and potentially useful) to `SetDispensable` on a
7211    /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
7212    /// to later replace a failed dispensable `Node` that was a direct child of
7213    /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
7214    /// (since there's no `AttachToken` on a group). Instead, to enable
7215    /// `AttachToken` replacement in this case, create an additional
7216    /// non-dispensable token that's a direct child of the group and make the
7217    /// existing dispensable token a child of the additional token.  This way,
7218    /// the additional token that is a direct child of the group has
7219    /// `BufferCollection.AttachToken` which can be used to replace the failed
7220    /// dispensable token.
7221    ///
7222    /// `SetDispensable` on an already-dispensable token is idempotent.
7223    pub fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
7224        BufferCollectionTokenProxyInterface::r#set_dispensable(self)
7225    }
7226
7227    /// Create a logical OR among a set of tokens, called a
7228    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
7229    ///
7230    /// Most sysmem clients and many participants don't need to care about this
7231    /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
7232    /// a participant wants to attempt to include one set of delegate
7233    /// participants, but if constraints don't combine successfully that way,
7234    /// fall back to a different (possibly overlapping) set of delegate
7235    /// participants, and/or fall back to a less demanding strategy (in terms of
7236    /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
7237    /// across all involved delegate participants). In such cases, a
7238    /// `BufferCollectionTokenGroup` is useful.
7239    ///
7240    /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
7241    /// child [`fuchsia.sysmem2/BufferCollectionToken`](s).  The child tokens
7242    /// which are not selected during aggregation will fail (close), which a
7243    /// potential participant should notice when their `BufferCollection`
7244    /// channel client endpoint sees PEER_CLOSED, allowing the participant to
7245    /// clean up the speculative usage that didn't end up happening (this is
7246    /// simimlar to a normal `BufferCollection` server end closing on failure to
7247    /// allocate a logical buffer collection or later async failure of a buffer
7248    /// collection).
7249    ///
7250    /// See comments on protocol `BufferCollectionTokenGroup`.
7251    ///
7252    /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
7253    /// applied to the whole group can be achieved with a
7254    /// `BufferCollectionToken` for this purpose as a direct parent of the
7255    /// `BufferCollectionTokenGroup`.
7256    ///
7257    /// All table fields are currently required.
7258    ///
7259    /// + request `group_request` The server end of a
7260    ///   `BufferCollectionTokenGroup` channel to be served by sysmem.
7261    pub fn r#create_buffer_collection_token_group(
7262        &self,
7263        mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
7264    ) -> Result<(), fidl::Error> {
7265        BufferCollectionTokenProxyInterface::r#create_buffer_collection_token_group(self, payload)
7266    }
7267}
7268
7269impl BufferCollectionTokenProxyInterface for BufferCollectionTokenProxy {
7270    type SyncResponseFut =
7271        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
7272    fn r#sync(&self) -> Self::SyncResponseFut {
7273        fn _decode(
7274            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7275        ) -> Result<(), fidl::Error> {
7276            let _response = fidl::client::decode_transaction_body::<
7277                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
7278                fidl::encoding::DefaultFuchsiaResourceDialect,
7279                0x11ac2555cf575b54,
7280            >(_buf?)?
7281            .into_result::<BufferCollectionTokenMarker>("sync")?;
7282            Ok(_response)
7283        }
7284        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
7285            (),
7286            0x11ac2555cf575b54,
7287            fidl::encoding::DynamicFlags::FLEXIBLE,
7288            _decode,
7289        )
7290    }
7291
7292    fn r#release(&self) -> Result<(), fidl::Error> {
7293        self.client.send::<fidl::encoding::EmptyPayload>(
7294            (),
7295            0x6a5cae7d6d6e04c6,
7296            fidl::encoding::DynamicFlags::FLEXIBLE,
7297        )
7298    }
7299
7300    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
7301        self.client.send::<NodeSetNameRequest>(
7302            payload,
7303            0xb41f1624f48c1e9,
7304            fidl::encoding::DynamicFlags::FLEXIBLE,
7305        )
7306    }
7307
7308    fn r#set_debug_client_info(
7309        &self,
7310        mut payload: &NodeSetDebugClientInfoRequest,
7311    ) -> Result<(), fidl::Error> {
7312        self.client.send::<NodeSetDebugClientInfoRequest>(
7313            payload,
7314            0x5cde8914608d99b1,
7315            fidl::encoding::DynamicFlags::FLEXIBLE,
7316        )
7317    }
7318
7319    fn r#set_debug_timeout_log_deadline(
7320        &self,
7321        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
7322    ) -> Result<(), fidl::Error> {
7323        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
7324            payload,
7325            0x716b0af13d5c0806,
7326            fidl::encoding::DynamicFlags::FLEXIBLE,
7327        )
7328    }
7329
7330    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
7331        self.client.send::<fidl::encoding::EmptyPayload>(
7332            (),
7333            0x5209c77415b4dfad,
7334            fidl::encoding::DynamicFlags::FLEXIBLE,
7335        )
7336    }
7337
7338    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
7339        NodeGetNodeRefResponse,
7340        fidl::encoding::DefaultFuchsiaResourceDialect,
7341    >;
7342    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
7343        fn _decode(
7344            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7345        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
7346            let _response = fidl::client::decode_transaction_body::<
7347                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
7348                fidl::encoding::DefaultFuchsiaResourceDialect,
7349                0x5b3d0e51614df053,
7350            >(_buf?)?
7351            .into_result::<BufferCollectionTokenMarker>("get_node_ref")?;
7352            Ok(_response)
7353        }
7354        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
7355            (),
7356            0x5b3d0e51614df053,
7357            fidl::encoding::DynamicFlags::FLEXIBLE,
7358            _decode,
7359        )
7360    }
7361
7362    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
7363        NodeIsAlternateForResult,
7364        fidl::encoding::DefaultFuchsiaResourceDialect,
7365    >;
7366    fn r#is_alternate_for(
7367        &self,
7368        mut payload: NodeIsAlternateForRequest,
7369    ) -> Self::IsAlternateForResponseFut {
7370        fn _decode(
7371            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7372        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
7373            let _response = fidl::client::decode_transaction_body::<
7374                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
7375                fidl::encoding::DefaultFuchsiaResourceDialect,
7376                0x3a58e00157e0825,
7377            >(_buf?)?
7378            .into_result::<BufferCollectionTokenMarker>("is_alternate_for")?;
7379            Ok(_response.map(|x| x))
7380        }
7381        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
7382            &mut payload,
7383            0x3a58e00157e0825,
7384            fidl::encoding::DynamicFlags::FLEXIBLE,
7385            _decode,
7386        )
7387    }
7388
7389    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
7390        NodeGetBufferCollectionIdResponse,
7391        fidl::encoding::DefaultFuchsiaResourceDialect,
7392    >;
7393    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
7394        fn _decode(
7395            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7396        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
7397            let _response = fidl::client::decode_transaction_body::<
7398                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
7399                fidl::encoding::DefaultFuchsiaResourceDialect,
7400                0x77d19a494b78ba8c,
7401            >(_buf?)?
7402            .into_result::<BufferCollectionTokenMarker>("get_buffer_collection_id")?;
7403            Ok(_response)
7404        }
7405        self.client.send_query_and_decode::<
7406            fidl::encoding::EmptyPayload,
7407            NodeGetBufferCollectionIdResponse,
7408        >(
7409            (),
7410            0x77d19a494b78ba8c,
7411            fidl::encoding::DynamicFlags::FLEXIBLE,
7412            _decode,
7413        )
7414    }
7415
7416    fn r#set_weak(&self) -> Result<(), fidl::Error> {
7417        self.client.send::<fidl::encoding::EmptyPayload>(
7418            (),
7419            0x22dd3ea514eeffe1,
7420            fidl::encoding::DynamicFlags::FLEXIBLE,
7421        )
7422    }
7423
7424    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
7425        self.client.send::<NodeSetWeakOkRequest>(
7426            &mut payload,
7427            0x38a44fc4d7724be9,
7428            fidl::encoding::DynamicFlags::FLEXIBLE,
7429        )
7430    }
7431
7432    fn r#attach_node_tracking(
7433        &self,
7434        mut payload: NodeAttachNodeTrackingRequest,
7435    ) -> Result<(), fidl::Error> {
7436        self.client.send::<NodeAttachNodeTrackingRequest>(
7437            &mut payload,
7438            0x3f22f2a293d3cdac,
7439            fidl::encoding::DynamicFlags::FLEXIBLE,
7440        )
7441    }
7442
7443    type DuplicateSyncResponseFut = fidl::client::QueryResponseFut<
7444        BufferCollectionTokenDuplicateSyncResponse,
7445        fidl::encoding::DefaultFuchsiaResourceDialect,
7446    >;
7447    fn r#duplicate_sync(
7448        &self,
7449        mut payload: &BufferCollectionTokenDuplicateSyncRequest,
7450    ) -> Self::DuplicateSyncResponseFut {
7451        fn _decode(
7452            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
7453        ) -> Result<BufferCollectionTokenDuplicateSyncResponse, fidl::Error> {
7454            let _response = fidl::client::decode_transaction_body::<
7455                fidl::encoding::FlexibleType<BufferCollectionTokenDuplicateSyncResponse>,
7456                fidl::encoding::DefaultFuchsiaResourceDialect,
7457                0x1c1af9919d1ca45c,
7458            >(_buf?)?
7459            .into_result::<BufferCollectionTokenMarker>("duplicate_sync")?;
7460            Ok(_response)
7461        }
7462        self.client.send_query_and_decode::<
7463            BufferCollectionTokenDuplicateSyncRequest,
7464            BufferCollectionTokenDuplicateSyncResponse,
7465        >(
7466            payload,
7467            0x1c1af9919d1ca45c,
7468            fidl::encoding::DynamicFlags::FLEXIBLE,
7469            _decode,
7470        )
7471    }
7472
7473    fn r#duplicate(
7474        &self,
7475        mut payload: BufferCollectionTokenDuplicateRequest,
7476    ) -> Result<(), fidl::Error> {
7477        self.client.send::<BufferCollectionTokenDuplicateRequest>(
7478            &mut payload,
7479            0x73e78f92ee7fb887,
7480            fidl::encoding::DynamicFlags::FLEXIBLE,
7481        )
7482    }
7483
7484    fn r#set_dispensable(&self) -> Result<(), fidl::Error> {
7485        self.client.send::<fidl::encoding::EmptyPayload>(
7486            (),
7487            0x228acf979254df8b,
7488            fidl::encoding::DynamicFlags::FLEXIBLE,
7489        )
7490    }
7491
7492    fn r#create_buffer_collection_token_group(
7493        &self,
7494        mut payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
7495    ) -> Result<(), fidl::Error> {
7496        self.client.send::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
7497            &mut payload,
7498            0x30f8d48e77bd36f2,
7499            fidl::encoding::DynamicFlags::FLEXIBLE,
7500        )
7501    }
7502}
7503
7504pub struct BufferCollectionTokenEventStream {
7505    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
7506}
7507
7508impl std::marker::Unpin for BufferCollectionTokenEventStream {}
7509
7510impl futures::stream::FusedStream for BufferCollectionTokenEventStream {
7511    fn is_terminated(&self) -> bool {
7512        self.event_receiver.is_terminated()
7513    }
7514}
7515
7516impl futures::Stream for BufferCollectionTokenEventStream {
7517    type Item = Result<BufferCollectionTokenEvent, fidl::Error>;
7518
7519    fn poll_next(
7520        mut self: std::pin::Pin<&mut Self>,
7521        cx: &mut std::task::Context<'_>,
7522    ) -> std::task::Poll<Option<Self::Item>> {
7523        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
7524            &mut self.event_receiver,
7525            cx
7526        )?) {
7527            Some(buf) => std::task::Poll::Ready(Some(BufferCollectionTokenEvent::decode(buf))),
7528            None => std::task::Poll::Ready(None),
7529        }
7530    }
7531}
7532
7533#[derive(Debug)]
7534pub enum BufferCollectionTokenEvent {
7535    #[non_exhaustive]
7536    _UnknownEvent {
7537        /// Ordinal of the event that was sent.
7538        ordinal: u64,
7539    },
7540}
7541
7542impl BufferCollectionTokenEvent {
7543    /// Decodes a message buffer as a [`BufferCollectionTokenEvent`].
7544    fn decode(
7545        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
7546    ) -> Result<BufferCollectionTokenEvent, fidl::Error> {
7547        let (bytes, _handles) = buf.split_mut();
7548        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
7549        debug_assert_eq!(tx_header.tx_id, 0);
7550        match tx_header.ordinal {
7551            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7552                Ok(BufferCollectionTokenEvent::_UnknownEvent { ordinal: tx_header.ordinal })
7553            }
7554            _ => Err(fidl::Error::UnknownOrdinal {
7555                ordinal: tx_header.ordinal,
7556                protocol_name:
7557                    <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
7558            }),
7559        }
7560    }
7561}
7562
7563/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollectionToken.
7564pub struct BufferCollectionTokenRequestStream {
7565    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
7566    is_terminated: bool,
7567}
7568
7569impl std::marker::Unpin for BufferCollectionTokenRequestStream {}
7570
7571impl futures::stream::FusedStream for BufferCollectionTokenRequestStream {
7572    fn is_terminated(&self) -> bool {
7573        self.is_terminated
7574    }
7575}
7576
7577impl fidl::endpoints::RequestStream for BufferCollectionTokenRequestStream {
7578    type Protocol = BufferCollectionTokenMarker;
7579    type ControlHandle = BufferCollectionTokenControlHandle;
7580
7581    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
7582        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
7583    }
7584
7585    fn control_handle(&self) -> Self::ControlHandle {
7586        BufferCollectionTokenControlHandle { inner: self.inner.clone() }
7587    }
7588
7589    fn into_inner(
7590        self,
7591    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
7592    {
7593        (self.inner, self.is_terminated)
7594    }
7595
7596    fn from_inner(
7597        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
7598        is_terminated: bool,
7599    ) -> Self {
7600        Self { inner, is_terminated }
7601    }
7602}
7603
7604impl futures::Stream for BufferCollectionTokenRequestStream {
7605    type Item = Result<BufferCollectionTokenRequest, fidl::Error>;
7606
7607    fn poll_next(
7608        mut self: std::pin::Pin<&mut Self>,
7609        cx: &mut std::task::Context<'_>,
7610    ) -> std::task::Poll<Option<Self::Item>> {
7611        let this = &mut *self;
7612        if this.inner.check_shutdown(cx) {
7613            this.is_terminated = true;
7614            return std::task::Poll::Ready(None);
7615        }
7616        if this.is_terminated {
7617            panic!("polled BufferCollectionTokenRequestStream after completion");
7618        }
7619        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
7620            |bytes, handles| {
7621                match this.inner.channel().read_etc(cx, bytes, handles) {
7622                    std::task::Poll::Ready(Ok(())) => {}
7623                    std::task::Poll::Pending => return std::task::Poll::Pending,
7624                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
7625                        this.is_terminated = true;
7626                        return std::task::Poll::Ready(None);
7627                    }
7628                    std::task::Poll::Ready(Err(e)) => {
7629                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
7630                            e.into(),
7631                        ))))
7632                    }
7633                }
7634
7635                // A message has been received from the channel
7636                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
7637
7638                std::task::Poll::Ready(Some(match header.ordinal {
7639                0x11ac2555cf575b54 => {
7640                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7641                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7642                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7643                    let control_handle = BufferCollectionTokenControlHandle {
7644                        inner: this.inner.clone(),
7645                    };
7646                    Ok(BufferCollectionTokenRequest::Sync {
7647                        responder: BufferCollectionTokenSyncResponder {
7648                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7649                            tx_id: header.tx_id,
7650                        },
7651                    })
7652                }
7653                0x6a5cae7d6d6e04c6 => {
7654                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7655                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7656                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7657                    let control_handle = BufferCollectionTokenControlHandle {
7658                        inner: this.inner.clone(),
7659                    };
7660                    Ok(BufferCollectionTokenRequest::Release {
7661                        control_handle,
7662                    })
7663                }
7664                0xb41f1624f48c1e9 => {
7665                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7666                    let mut req = fidl::new_empty!(NodeSetNameRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7667                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
7668                    let control_handle = BufferCollectionTokenControlHandle {
7669                        inner: this.inner.clone(),
7670                    };
7671                    Ok(BufferCollectionTokenRequest::SetName {payload: req,
7672                        control_handle,
7673                    })
7674                }
7675                0x5cde8914608d99b1 => {
7676                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7677                    let mut req = fidl::new_empty!(NodeSetDebugClientInfoRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7678                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
7679                    let control_handle = BufferCollectionTokenControlHandle {
7680                        inner: this.inner.clone(),
7681                    };
7682                    Ok(BufferCollectionTokenRequest::SetDebugClientInfo {payload: req,
7683                        control_handle,
7684                    })
7685                }
7686                0x716b0af13d5c0806 => {
7687                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7688                    let mut req = fidl::new_empty!(NodeSetDebugTimeoutLogDeadlineRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7689                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
7690                    let control_handle = BufferCollectionTokenControlHandle {
7691                        inner: this.inner.clone(),
7692                    };
7693                    Ok(BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline {payload: req,
7694                        control_handle,
7695                    })
7696                }
7697                0x5209c77415b4dfad => {
7698                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7699                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7700                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7701                    let control_handle = BufferCollectionTokenControlHandle {
7702                        inner: this.inner.clone(),
7703                    };
7704                    Ok(BufferCollectionTokenRequest::SetVerboseLogging {
7705                        control_handle,
7706                    })
7707                }
7708                0x5b3d0e51614df053 => {
7709                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7710                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7711                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7712                    let control_handle = BufferCollectionTokenControlHandle {
7713                        inner: this.inner.clone(),
7714                    };
7715                    Ok(BufferCollectionTokenRequest::GetNodeRef {
7716                        responder: BufferCollectionTokenGetNodeRefResponder {
7717                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7718                            tx_id: header.tx_id,
7719                        },
7720                    })
7721                }
7722                0x3a58e00157e0825 => {
7723                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7724                    let mut req = fidl::new_empty!(NodeIsAlternateForRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7725                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
7726                    let control_handle = BufferCollectionTokenControlHandle {
7727                        inner: this.inner.clone(),
7728                    };
7729                    Ok(BufferCollectionTokenRequest::IsAlternateFor {payload: req,
7730                        responder: BufferCollectionTokenIsAlternateForResponder {
7731                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7732                            tx_id: header.tx_id,
7733                        },
7734                    })
7735                }
7736                0x77d19a494b78ba8c => {
7737                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7738                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7739                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7740                    let control_handle = BufferCollectionTokenControlHandle {
7741                        inner: this.inner.clone(),
7742                    };
7743                    Ok(BufferCollectionTokenRequest::GetBufferCollectionId {
7744                        responder: BufferCollectionTokenGetBufferCollectionIdResponder {
7745                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7746                            tx_id: header.tx_id,
7747                        },
7748                    })
7749                }
7750                0x22dd3ea514eeffe1 => {
7751                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7752                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7753                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7754                    let control_handle = BufferCollectionTokenControlHandle {
7755                        inner: this.inner.clone(),
7756                    };
7757                    Ok(BufferCollectionTokenRequest::SetWeak {
7758                        control_handle,
7759                    })
7760                }
7761                0x38a44fc4d7724be9 => {
7762                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7763                    let mut req = fidl::new_empty!(NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7764                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
7765                    let control_handle = BufferCollectionTokenControlHandle {
7766                        inner: this.inner.clone(),
7767                    };
7768                    Ok(BufferCollectionTokenRequest::SetWeakOk {payload: req,
7769                        control_handle,
7770                    })
7771                }
7772                0x3f22f2a293d3cdac => {
7773                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7774                    let mut req = fidl::new_empty!(NodeAttachNodeTrackingRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7775                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
7776                    let control_handle = BufferCollectionTokenControlHandle {
7777                        inner: this.inner.clone(),
7778                    };
7779                    Ok(BufferCollectionTokenRequest::AttachNodeTracking {payload: req,
7780                        control_handle,
7781                    })
7782                }
7783                0x1c1af9919d1ca45c => {
7784                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
7785                    let mut req = fidl::new_empty!(BufferCollectionTokenDuplicateSyncRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7786                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenDuplicateSyncRequest>(&header, _body_bytes, handles, &mut req)?;
7787                    let control_handle = BufferCollectionTokenControlHandle {
7788                        inner: this.inner.clone(),
7789                    };
7790                    Ok(BufferCollectionTokenRequest::DuplicateSync {payload: req,
7791                        responder: BufferCollectionTokenDuplicateSyncResponder {
7792                            control_handle: std::mem::ManuallyDrop::new(control_handle),
7793                            tx_id: header.tx_id,
7794                        },
7795                    })
7796                }
7797                0x73e78f92ee7fb887 => {
7798                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7799                    let mut req = fidl::new_empty!(BufferCollectionTokenDuplicateRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7800                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenDuplicateRequest>(&header, _body_bytes, handles, &mut req)?;
7801                    let control_handle = BufferCollectionTokenControlHandle {
7802                        inner: this.inner.clone(),
7803                    };
7804                    Ok(BufferCollectionTokenRequest::Duplicate {payload: req,
7805                        control_handle,
7806                    })
7807                }
7808                0x228acf979254df8b => {
7809                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7810                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
7811                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
7812                    let control_handle = BufferCollectionTokenControlHandle {
7813                        inner: this.inner.clone(),
7814                    };
7815                    Ok(BufferCollectionTokenRequest::SetDispensable {
7816                        control_handle,
7817                    })
7818                }
7819                0x30f8d48e77bd36f2 => {
7820                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
7821                    let mut req = fidl::new_empty!(BufferCollectionTokenCreateBufferCollectionTokenGroupRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
7822                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(&header, _body_bytes, handles, &mut req)?;
7823                    let control_handle = BufferCollectionTokenControlHandle {
7824                        inner: this.inner.clone(),
7825                    };
7826                    Ok(BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup {payload: req,
7827                        control_handle,
7828                    })
7829                }
7830                _ if header.tx_id == 0 && header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7831                    Ok(BufferCollectionTokenRequest::_UnknownMethod {
7832                        ordinal: header.ordinal,
7833                        control_handle: BufferCollectionTokenControlHandle { inner: this.inner.clone() },
7834                        method_type: fidl::MethodType::OneWay,
7835                    })
7836                }
7837                _ if header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
7838                    this.inner.send_framework_err(
7839                        fidl::encoding::FrameworkErr::UnknownMethod,
7840                        header.tx_id,
7841                        header.ordinal,
7842                        header.dynamic_flags(),
7843                        (bytes, handles),
7844                    )?;
7845                    Ok(BufferCollectionTokenRequest::_UnknownMethod {
7846                        ordinal: header.ordinal,
7847                        control_handle: BufferCollectionTokenControlHandle { inner: this.inner.clone() },
7848                        method_type: fidl::MethodType::TwoWay,
7849                    })
7850                }
7851                _ => Err(fidl::Error::UnknownOrdinal {
7852                    ordinal: header.ordinal,
7853                    protocol_name: <BufferCollectionTokenMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
7854                }),
7855            }))
7856            },
7857        )
7858    }
7859}
7860
7861/// A [`fuchsia.sysmem2/BufferCollectionToken`] is not a buffer collection, but
7862/// rather is a way to identify a specific potential shared buffer collection,
7863/// and a way to distribute that potential shared buffer collection to
7864/// additional participants prior to the buffer collection allocating any
7865/// buffers.
7866///
7867/// Epitaphs are not used in this protocol.
7868///
7869/// We use a channel for the `BufferCollectionToken` instead of a single
7870/// `eventpair` (pair) because this way we can detect error conditions like a
7871/// participant failing mid-create.
7872#[derive(Debug)]
7873pub enum BufferCollectionTokenRequest {
7874    /// Ensure that previous messages have been received server side. This is
7875    /// particularly useful after previous messages that created new tokens,
7876    /// because a token must be known to the sysmem server before sending the
7877    /// token to another participant.
7878    ///
7879    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
7880    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
7881    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
7882    /// to mitigate the possibility of a hostile/fake
7883    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
7884    /// Another way is to pass the token to
7885    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
7886    /// the token as part of exchanging it for a
7887    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
7888    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
7889    /// of stalling.
7890    ///
7891    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
7892    /// and then starting and completing a `Sync`, it's then safe to send the
7893    /// `BufferCollectionToken` client ends to other participants knowing the
7894    /// server will recognize the tokens when they're sent by the other
7895    /// participants to sysmem in a
7896    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
7897    /// efficient way to create tokens while avoiding unnecessary round trips.
7898    ///
7899    /// Other options include waiting for each
7900    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
7901    /// individually (using separate call to `Sync` after each), or calling
7902    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
7903    /// converted to a `BufferCollection` via
7904    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
7905    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
7906    /// the sync step and can create multiple tokens at once.
7907    Sync { responder: BufferCollectionTokenSyncResponder },
7908    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
7909    ///
7910    /// Normally a participant will convert a `BufferCollectionToken` into a
7911    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
7912    /// `Release` via the token (and then close the channel immediately or
7913    /// shortly later in response to server closing the server end), which
7914    /// avoids causing buffer collection failure. Without a prior `Release`,
7915    /// closing the `BufferCollectionToken` client end will cause buffer
7916    /// collection failure.
7917    ///
7918    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
7919    ///
7920    /// By default the server handles unexpected closure of a
7921    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
7922    /// first) by failing the buffer collection. Partly this is to expedite
7923    /// closing VMO handles to reclaim memory when any participant fails. If a
7924    /// participant would like to cleanly close a `BufferCollection` without
7925    /// causing buffer collection failure, the participant can send `Release`
7926    /// before closing the `BufferCollection` client end. The `Release` can
7927    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
7928    /// buffer collection won't require constraints from this node in order to
7929    /// allocate. If after `SetConstraints`, the constraints are retained and
7930    /// aggregated, despite the lack of `BufferCollection` connection at the
7931    /// time of constraints aggregation.
7932    ///
7933    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
7934    ///
7935    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
7936    /// end (without `Release` first) will trigger failure of the buffer
7937    /// collection. To close a `BufferCollectionTokenGroup` channel without
7938    /// failing the buffer collection, ensure that AllChildrenPresent() has been
7939    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
7940    /// client end.
7941    ///
7942    /// If `Release` occurs before
7943    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
7944    /// buffer collection will fail (triggered by reception of `Release` without
7945    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
7946    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
7947    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
7948    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
7949    /// close requires `AllChildrenPresent` (if not already sent), then
7950    /// `Release`, then close client end.
7951    ///
7952    /// If `Release` occurs after `AllChildrenPresent`, the children and all
7953    /// their constraints remain intact (just as they would if the
7954    /// `BufferCollectionTokenGroup` channel had remained open), and the client
7955    /// end close doesn't trigger buffer collection failure.
7956    ///
7957    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
7958    ///
7959    /// For brevity, the per-channel-protocol paragraphs above ignore the
7960    /// separate failure domain created by
7961    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
7962    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
7963    /// unexpectedly closes (without `Release` first) and that client end is
7964    /// under a failure domain, instead of failing the whole buffer collection,
7965    /// the failure domain is failed, but the buffer collection itself is
7966    /// isolated from failure of the failure domain. Such failure domains can be
7967    /// nested, in which case only the inner-most failure domain in which the
7968    /// `Node` resides fails.
7969    Release { control_handle: BufferCollectionTokenControlHandle },
7970    /// Set a name for VMOs in this buffer collection.
7971    ///
7972    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
7973    /// will be truncated to fit. The name of the vmo will be suffixed with the
7974    /// buffer index within the collection (if the suffix fits within
7975    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
7976    /// listed in the inspect data.
7977    ///
7978    /// The name only affects VMOs allocated after the name is set; this call
7979    /// does not rename existing VMOs. If multiple clients set different names
7980    /// then the larger priority value will win. Setting a new name with the
7981    /// same priority as a prior name doesn't change the name.
7982    ///
7983    /// All table fields are currently required.
7984    ///
7985    /// + request `priority` The name is only set if this is the first `SetName`
7986    ///   or if `priority` is greater than any previous `priority` value in
7987    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
7988    /// + request `name` The name for VMOs created under this buffer collection.
7989    SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionTokenControlHandle },
7990    /// Set information about the current client that can be used by sysmem to
7991    /// help diagnose leaking memory and allocation stalls waiting for a
7992    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
7993    ///
7994    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
7995    /// `Node`(s) derived from this `Node`, unless overriden by
7996    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
7997    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
7998    ///
7999    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
8000    /// `Allocator` is the most efficient way to ensure that all
8001    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
8002    /// set, and is also more efficient than separately sending the same debug
8003    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
8004    /// created [`fuchsia.sysmem2/Node`].
8005    ///
8006    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
8007    /// indicate which client is closing their channel first, leading to subtree
8008    /// failure (which can be normal if the purpose of the subtree is over, but
8009    /// if happening earlier than expected, the client-channel-specific name can
8010    /// help diagnose where the failure is first coming from, from sysmem's
8011    /// point of view).
8012    ///
8013    /// All table fields are currently required.
8014    ///
8015    /// + request `name` This can be an arbitrary string, but the current
8016    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
8017    /// + request `id` This can be an arbitrary id, but the current process ID
8018    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
8019    SetDebugClientInfo {
8020        payload: NodeSetDebugClientInfoRequest,
8021        control_handle: BufferCollectionTokenControlHandle,
8022    },
8023    /// Sysmem logs a warning if sysmem hasn't seen
8024    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
8025    /// within 5 seconds after creation of a new collection.
8026    ///
8027    /// Clients can call this method to change when the log is printed. If
8028    /// multiple client set the deadline, it's unspecified which deadline will
8029    /// take effect.
8030    ///
8031    /// In most cases the default works well.
8032    ///
8033    /// All table fields are currently required.
8034    ///
8035    /// + request `deadline` The time at which sysmem will start trying to log
8036    ///   the warning, unless all constraints are with sysmem by then.
8037    SetDebugTimeoutLogDeadline {
8038        payload: NodeSetDebugTimeoutLogDeadlineRequest,
8039        control_handle: BufferCollectionTokenControlHandle,
8040    },
8041    /// This enables verbose logging for the buffer collection.
8042    ///
8043    /// Verbose logging includes constraints set via
8044    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
8045    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
8046    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
8047    /// the tree of `Node`(s).
8048    ///
8049    /// Normally sysmem prints only a single line complaint when aggregation
8050    /// fails, with just the specific detailed reason that aggregation failed,
8051    /// with little surrounding context.  While this is often enough to diagnose
8052    /// a problem if only a small change was made and everything was working
8053    /// before the small change, it's often not particularly helpful for getting
8054    /// a new buffer collection to work for the first time.  Especially with
8055    /// more complex trees of nodes, involving things like
8056    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
8057    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
8058    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
8059    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
8060    /// looks like and why it's failing a logical allocation, or why a tree or
8061    /// subtree is failing sooner than expected.
8062    ///
8063    /// The intent of the extra logging is to be acceptable from a performance
8064    /// point of view, under the assumption that verbose logging is only enabled
8065    /// on a low number of buffer collections. If we're not tracking down a bug,
8066    /// we shouldn't send this message.
8067    SetVerboseLogging { control_handle: BufferCollectionTokenControlHandle },
8068    /// This gets a handle that can be used as a parameter to
8069    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
8070    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
8071    /// client obtained this handle from this `Node`.
8072    ///
8073    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
8074    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
8075    /// despite the two calls typically being on different channels.
8076    ///
8077    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
8078    ///
8079    /// All table fields are currently required.
8080    ///
8081    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
8082    ///   different `Node` channel, to prove that the client obtained the handle
8083    ///   from this `Node`.
8084    GetNodeRef { responder: BufferCollectionTokenGetNodeRefResponder },
8085    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
8086    /// rooted at a different child token of a common parent
8087    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
8088    /// passed-in `node_ref`.
8089    ///
8090    /// This call is for assisting with admission control de-duplication, and
8091    /// with debugging.
8092    ///
8093    /// The `node_ref` must be obtained using
8094    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
8095    ///
8096    /// The `node_ref` can be a duplicated handle; it's not necessary to call
8097    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
8098    ///
8099    /// If a calling token may not actually be a valid token at all due to a
8100    /// potentially hostile/untrusted provider of the token, call
8101    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
8102    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
8103    /// never responds due to a calling token not being a real token (not really
8104    /// talking to sysmem).  Another option is to call
8105    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
8106    /// which also validates the token along with converting it to a
8107    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
8108    ///
8109    /// All table fields are currently required.
8110    ///
8111    /// - response `is_alternate`
8112    ///   - true: The first parent node in common between the calling node and
8113    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
8114    ///     that the calling `Node` and the `node_ref` `Node` will not have both
8115    ///     their constraints apply - rather sysmem will choose one or the other
8116    ///     of the constraints - never both.  This is because only one child of
8117    ///     a `BufferCollectionTokenGroup` is selected during logical
8118    ///     allocation, with only that one child's subtree contributing to
8119    ///     constraints aggregation.
8120    ///   - false: The first parent node in common between the calling `Node`
8121    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
8122    ///     Currently, this means the first parent node in common is a
8123    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
8124    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
8125    ///     `Node` may have both their constraints apply during constraints
8126    ///     aggregation of the logical allocation, if both `Node`(s) are
8127    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
8128    ///     this case, there is no `BufferCollectionTokenGroup` that will
8129    ///     directly prevent the two `Node`(s) from both being selected and
8130    ///     their constraints both aggregated, but even when false, one or both
8131    ///     `Node`(s) may still be eliminated from consideration if one or both
8132    ///     `Node`(s) has a direct or indirect parent
8133    ///     `BufferCollectionTokenGroup` which selects a child subtree other
8134    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
8135    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
8136    ///   associated with the same buffer collection as the calling `Node`.
8137    ///   Another reason for this error is if the `node_ref` is an
8138    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
8139    ///   a real `node_ref` obtained from `GetNodeRef`.
8140    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
8141    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
8142    ///   the needed rights expected on a real `node_ref`.
8143    /// * No other failing status codes are returned by this call.  However,
8144    ///   sysmem may add additional codes in future, so the client should have
8145    ///   sensible default handling for any failing status code.
8146    IsAlternateFor {
8147        payload: NodeIsAlternateForRequest,
8148        responder: BufferCollectionTokenIsAlternateForResponder,
8149    },
8150    /// Get the buffer collection ID. This ID is also available from
8151    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
8152    /// within the collection).
8153    ///
8154    /// This call is mainly useful in situations where we can't convey a
8155    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
8156    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
8157    /// handle, which can be joined back up with a `BufferCollection` client end
8158    /// that was created via a different path. Prefer to convey a
8159    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
8160    ///
8161    /// Trusting a `buffer_collection_id` value from a source other than sysmem
8162    /// is analogous to trusting a koid value from a source other than zircon.
8163    /// Both should be avoided unless really necessary, and both require
8164    /// caution. In some situations it may be reasonable to refer to a
8165    /// pre-established `BufferCollection` by `buffer_collection_id` via a
8166    /// protocol for efficiency reasons, but an incoming value purporting to be
8167    /// a `buffer_collection_id` is not sufficient alone to justify granting the
8168    /// sender of the `buffer_collection_id` any capability. The sender must
8169    /// first prove to a receiver that the sender has/had a VMO or has/had a
8170    /// `BufferCollectionToken` to the same collection by sending a handle that
8171    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
8172    /// `buffer_collection_id` value. The receiver should take care to avoid
8173    /// assuming that a sender had a `BufferCollectionToken` in cases where the
8174    /// sender has only proven that the sender had a VMO.
8175    ///
8176    /// - response `buffer_collection_id` This ID is unique per buffer
8177    ///   collection per boot. Each buffer is uniquely identified by the
8178    ///   `buffer_collection_id` and `buffer_index` together.
8179    GetBufferCollectionId { responder: BufferCollectionTokenGetBufferCollectionIdResponder },
8180    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
8181    /// created after this message to weak, which means that a client's `Node`
8182    /// client end (or a child created after this message) is not alone
8183    /// sufficient to keep allocated VMOs alive.
8184    ///
8185    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
8186    /// `close_weak_asap`.
8187    ///
8188    /// This message is only permitted before the `Node` becomes ready for
8189    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
8190    ///   * `BufferCollectionToken`: any time
8191    ///   * `BufferCollection`: before `SetConstraints`
8192    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
8193    ///
8194    /// Currently, no conversion from strong `Node` to weak `Node` after ready
8195    /// for allocation is provided, but a client can simulate that by creating
8196    /// an additional `Node` before allocation and setting that additional
8197    /// `Node` to weak, and then potentially at some point later sending
8198    /// `Release` and closing the client end of the client's strong `Node`, but
8199    /// keeping the client's weak `Node`.
8200    ///
8201    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
8202    /// collection failure (all `Node` client end(s) will see
8203    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
8204    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
8205    /// this situation until all `Node`(s) are ready for allocation. For initial
8206    /// allocation to succeed, at least one strong `Node` is required to exist
8207    /// at allocation time, but after that client receives VMO handles, that
8208    /// client can `BufferCollection.Release` and close the client end without
8209    /// causing this type of failure.
8210    ///
8211    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
8212    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
8213    /// separately as appropriate.
8214    SetWeak { control_handle: BufferCollectionTokenControlHandle },
8215    /// This indicates to sysmem that the client is prepared to pay attention to
8216    /// `close_weak_asap`.
8217    ///
8218    /// If sent, this message must be before
8219    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
8220    ///
8221    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
8222    /// send this message before `WaitForAllBuffersAllocated`, or a parent
8223    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
8224    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
8225    /// trigger buffer collection failure.
8226    ///
8227    /// This message is necessary because weak sysmem VMOs have not always been
8228    /// a thing, so older clients are not aware of the need to pay attention to
8229    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
8230    /// sysmem weak VMO handles asap. By having this message and requiring
8231    /// participants to indicate their acceptance of this aspect of the overall
8232    /// protocol, we avoid situations where an older client is delivered a weak
8233    /// VMO without any way for sysmem to get that VMO to close quickly later
8234    /// (and on a per-buffer basis).
8235    ///
8236    /// A participant that doesn't handle `close_weak_asap` and also doesn't
8237    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
8238    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
8239    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
8240    /// same participant has a child/delegate which does retrieve VMOs, that
8241    /// child/delegate will need to send `SetWeakOk` before
8242    /// `WaitForAllBuffersAllocated`.
8243    ///
8244    /// + request `for_child_nodes_also` If present and true, this means direct
8245    ///   child nodes of this node created after this message plus all
8246    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
8247    ///   those nodes. Any child node of this node that was created before this
8248    ///   message is not included. This setting is "sticky" in the sense that a
8249    ///   subsequent `SetWeakOk` without this bool set to true does not reset
8250    ///   the server-side bool. If this creates a problem for a participant, a
8251    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
8252    ///   tokens instead, as appropriate. A participant should only set
8253    ///   `for_child_nodes_also` true if the participant can really promise to
8254    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
8255    ///   weak VMO handles held by participants holding the corresponding child
8256    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
8257    ///   which are using sysmem(1) can be weak, despite the clients of those
8258    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
8259    ///   direct way to find out about `close_weak_asap`. This only applies to
8260    ///   descendents of this `Node` which are using sysmem(1), not to this
8261    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
8262    ///   token, which will fail allocation unless an ancestor of this `Node`
8263    ///   specified `for_child_nodes_also` true.
8264    SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: BufferCollectionTokenControlHandle },
8265    /// The server_end will be closed after this `Node` and any child nodes have
8266    /// have released their buffer counts, making those counts available for
8267    /// reservation by a different `Node` via
8268    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
8269    ///
8270    /// The `Node` buffer counts may not be released until the entire tree of
8271    /// `Node`(s) is closed or failed, because
8272    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
8273    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
8274    /// `Node` buffer counts remain reserved until the orphaned node is later
8275    /// cleaned up.
8276    ///
8277    /// If the `Node` exceeds a fairly large number of attached eventpair server
8278    /// ends, a log message will indicate this and the `Node` (and the
8279    /// appropriate) sub-tree will fail.
8280    ///
8281    /// The `server_end` will remain open when
8282    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
8283    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
8284    /// [`fuchsia.sysmem2/BufferCollection`].
8285    ///
8286    /// This message can also be used with a
8287    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
8288    AttachNodeTracking {
8289        payload: NodeAttachNodeTrackingRequest,
8290        control_handle: BufferCollectionTokenControlHandle,
8291    },
8292    /// Create additional [`fuchsia.sysmem2/BufferCollectionToken`](s) from this
8293    /// one, referring to the same buffer collection.
8294    ///
8295    /// The created tokens are children of this token in the
8296    /// [`fuchsia.sysmem2/Node`] heirarchy.
8297    ///
8298    /// This method can be used to add more participants, by transferring the
8299    /// newly created tokens to additional participants.
8300    ///
8301    /// A new token will be returned for each entry in the
8302    /// `rights_attenuation_masks` array.
8303    ///
8304    /// If the called token may not actually be a valid token due to a
8305    /// potentially hostile/untrusted provider of the token, consider using
8306    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
8307    /// instead of potentially getting stuck indefinitely if
8308    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] never responds
8309    /// due to the calling token not being a real token.
8310    ///
8311    /// In contrast to [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`], no
8312    /// separate [`fuchsia.sysmem2/Node.Sync`] is needed after calling this
8313    /// method, because the sync step is included in this call, at the cost of a
8314    /// round trip during this call.
8315    ///
8316    /// All tokens must be turned in to sysmem via
8317    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
8318    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
8319    /// successfully allocate buffers (or to logically allocate buffers in the
8320    /// case of subtrees involving
8321    /// [`fuchsia.sysmem2/BufferCollectionToken.AttachToken`]).
8322    ///
8323    /// All table fields are currently required.
8324    ///
8325    /// + request `rights_attenuation_mask` In each entry of
8326    ///   `rights_attenuation_masks`, rights bits that are zero will be absent
8327    ///   in the buffer VMO rights obtainable via the corresponding returned
8328    ///   token. This allows an initiator or intermediary participant to
8329    ///   attenuate the rights available to a participant. This does not allow a
8330    ///   participant to gain rights that the participant doesn't already have.
8331    ///   The value `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no
8332    ///   attenuation should be applied.
8333    /// - response `tokens` The client ends of each newly created token.
8334    DuplicateSync {
8335        payload: BufferCollectionTokenDuplicateSyncRequest,
8336        responder: BufferCollectionTokenDuplicateSyncResponder,
8337    },
8338    /// Create an additional [`fuchsia.sysmem2/BufferCollectionToken`] from this
8339    /// one, referring to the same buffer collection.
8340    ///
8341    /// The created token is a child of this token in the
8342    /// [`fuchsia.sysmem2/Node`] heirarchy.
8343    ///
8344    /// This method can be used to add a participant, by transferring the newly
8345    /// created token to another participant.
8346    ///
8347    /// This one-way message can be used instead of the two-way
8348    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] FIDL call in
8349    /// performance sensitive cases where it would be undesireable to wait for
8350    /// sysmem to respond to
8351    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] or when the
8352    /// client code isn't structured to make it easy to duplicate all the needed
8353    /// tokens at once.
8354    ///
8355    /// After sending one or more `Duplicate` messages, and before sending the
8356    /// newly created child tokens to other participants (or to other
8357    /// [`fuchsia.sysmem2/Allocator`] channels), the client must send a
8358    /// [`fuchsia.sysmem2/Node.Sync`] and wait for the `Sync` response. The
8359    /// `Sync` call can be made on the token, or on the `BufferCollection`
8360    /// obtained by passing this token to `BindSharedCollection`.  Either will
8361    /// ensure that the server knows about the tokens created via `Duplicate`
8362    /// before the other participant sends the token to the server via separate
8363    /// `Allocator` channel.
8364    ///
8365    /// All tokens must be turned in via
8366    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] or
8367    /// [`fuchsia.sysmem2/Node.Release`] for a `BufferCollection` to
8368    /// successfully allocate buffers.
8369    ///
8370    /// All table fields are currently required.
8371    ///
8372    /// + request `rights_attenuation_mask` The rights bits that are zero in
8373    ///   this mask will be absent in the buffer VMO rights obtainable via the
8374    ///   client end of `token_request`. This allows an initiator or
8375    ///   intermediary participant to attenuate the rights available to a
8376    ///   delegate participant. This does not allow a participant to gain rights
8377    ///   that the participant doesn't already have. The value
8378    ///   `ZX_RIGHT_SAME_RIGHTS` can be used to specify that no attenuation
8379    ///   should be applied.
8380    ///   + These values for rights_attenuation_mask result in no attenuation:
8381    ///     + `ZX_RIGHT_SAME_RIGHTS` (preferred)
8382    ///     + 0xFFFFFFFF (this is reasonable when an attenuation mask is
8383    ///       computed)
8384    ///     + 0 (deprecated - do not use 0 - an ERROR will go to the log)
8385    /// + request `token_request` is the server end of a `BufferCollectionToken`
8386    ///   channel. The client end of this channel acts as another participant in
8387    ///   the shared buffer collection.
8388    Duplicate {
8389        payload: BufferCollectionTokenDuplicateRequest,
8390        control_handle: BufferCollectionTokenControlHandle,
8391    },
8392    /// Set this [`fuchsia.sysmem2/BufferCollectionToken`] to dispensable.
8393    ///
8394    /// When the `BufferCollectionToken` is converted to a
8395    /// [`fuchsia.sysmem2/BufferCollection`], the dispensable status applies to
8396    /// the `BufferCollection` also.
8397    ///
8398    /// Normally, if a client closes a [`fuchsia.sysmem2/BufferCollection`]
8399    /// client end without having sent
8400    /// [`fuchsia.sysmem2/BufferCollection.Release`] first, the
8401    /// `BufferCollection` [`fuchisa.sysmem2/Node`] will fail, which also
8402    /// propagates failure to the parent [`fuchsia.sysmem2/Node`] and so on up
8403    /// to the root `Node`, which fails the whole buffer collection. In
8404    /// contrast, a dispensable `Node` can fail after buffers are allocated
8405    /// without causing failure of its parent in the [`fuchsia.sysmem2/Node`]
8406    /// heirarchy.
8407    ///
8408    /// The dispensable `Node` participates in constraints aggregation along
8409    /// with its parent before buffer allocation. If the dispensable `Node`
8410    /// fails before buffers are allocated, the failure propagates to the
8411    /// dispensable `Node`'s parent.
8412    ///
8413    /// After buffers are allocated, failure of the dispensable `Node` (or any
8414    /// child of the dispensable `Node`) does not propagate to the dispensable
8415    /// `Node`'s parent. Failure does propagate from a normal child of a
8416    /// dispensable `Node` to the dispensable `Node`.  Failure of a child is
8417    /// blocked from reaching its parent if the child is attached using
8418    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or if the child is
8419    /// dispensable and the failure occurred after allocation.
8420    ///
8421    /// A dispensable `Node` can be used in cases where a participant needs to
8422    /// provide constraints, but after buffers are allocated, the participant
8423    /// can fail without causing buffer collection failure from the parent
8424    /// `Node`'s point of view.
8425    ///
8426    /// In contrast, `BufferCollection.AttachToken` can be used to create a
8427    /// `BufferCollectionToken` which does not participate in constraints
8428    /// aggregation with its parent `Node`, and whose failure at any time does
8429    /// not propagate to its parent `Node`, and whose potential delay providing
8430    /// constraints does not prevent the parent `Node` from completing its
8431    /// buffer allocation.
8432    ///
8433    /// An initiator (creator of the root `Node` using
8434    /// [`fuchsia.sysmem2/Allocator.AllocateSharedCollection`]) may in some
8435    /// scenarios choose to initially use a dispensable `Node` for a first
8436    /// instance of a participant, and then later if the first instance of that
8437    /// participant fails, a new second instance of that participant my be given
8438    /// a `BufferCollectionToken` created with `AttachToken`.
8439    ///
8440    /// Normally a client will `SetDispensable` on a `BufferCollectionToken`
8441    /// shortly before sending the dispensable `BufferCollectionToken` to a
8442    /// delegate participant. Because `SetDispensable` prevents propagation of
8443    /// child `Node` failure to parent `Node`(s), if the client was relying on
8444    /// noticing child failure via failure of the parent `Node` retained by the
8445    /// client, the client may instead need to notice failure via other means.
8446    /// If other means aren't available/convenient, the client can instead
8447    /// retain the dispensable `Node` and create a child `Node` under that to
8448    /// send to the delegate participant, retaining this `Node` in order to
8449    /// notice failure of the subtree rooted at this `Node` via this `Node`'s
8450    /// ZX_CHANNEL_PEER_CLOSED signal, and take whatever action is appropriate
8451    /// (e.g. starting a new instance of the delegate participant and handing it
8452    /// a `BufferCollectionToken` created using
8453    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`], or propagate failure
8454    /// and clean up in a client-specific way).
8455    ///
8456    /// While it is possible (and potentially useful) to `SetDispensable` on a
8457    /// direct child of a `BufferCollectionTokenGroup` `Node`, it isn't possible
8458    /// to later replace a failed dispensable `Node` that was a direct child of
8459    /// a `BufferCollectionTokenGroup` with a new token using `AttachToken`
8460    /// (since there's no `AttachToken` on a group). Instead, to enable
8461    /// `AttachToken` replacement in this case, create an additional
8462    /// non-dispensable token that's a direct child of the group and make the
8463    /// existing dispensable token a child of the additional token.  This way,
8464    /// the additional token that is a direct child of the group has
8465    /// `BufferCollection.AttachToken` which can be used to replace the failed
8466    /// dispensable token.
8467    ///
8468    /// `SetDispensable` on an already-dispensable token is idempotent.
8469    SetDispensable { control_handle: BufferCollectionTokenControlHandle },
8470    /// Create a logical OR among a set of tokens, called a
8471    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
8472    ///
8473    /// Most sysmem clients and many participants don't need to care about this
8474    /// message or about `BufferCollectionTokenGroup`(s). However, in some cases
8475    /// a participant wants to attempt to include one set of delegate
8476    /// participants, but if constraints don't combine successfully that way,
8477    /// fall back to a different (possibly overlapping) set of delegate
8478    /// participants, and/or fall back to a less demanding strategy (in terms of
8479    /// how strict the [`fuchisa.sysmem2/BufferCollectionConstraints`] are,
8480    /// across all involved delegate participants). In such cases, a
8481    /// `BufferCollectionTokenGroup` is useful.
8482    ///
8483    /// A `BufferCollectionTokenGroup` is used to create a 1 of N OR among N
8484    /// child [`fuchsia.sysmem2/BufferCollectionToken`](s).  The child tokens
8485    /// which are not selected during aggregation will fail (close), which a
8486    /// potential participant should notice when their `BufferCollection`
8487    /// channel client endpoint sees PEER_CLOSED, allowing the participant to
8488    /// clean up the speculative usage that didn't end up happening (this is
8489    /// simimlar to a normal `BufferCollection` server end closing on failure to
8490    /// allocate a logical buffer collection or later async failure of a buffer
8491    /// collection).
8492    ///
8493    /// See comments on protocol `BufferCollectionTokenGroup`.
8494    ///
8495    /// Any `rights_attenuation_mask` or `AttachToken`/`SetDispensable` to be
8496    /// applied to the whole group can be achieved with a
8497    /// `BufferCollectionToken` for this purpose as a direct parent of the
8498    /// `BufferCollectionTokenGroup`.
8499    ///
8500    /// All table fields are currently required.
8501    ///
8502    /// + request `group_request` The server end of a
8503    ///   `BufferCollectionTokenGroup` channel to be served by sysmem.
8504    CreateBufferCollectionTokenGroup {
8505        payload: BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
8506        control_handle: BufferCollectionTokenControlHandle,
8507    },
8508    /// An interaction was received which does not match any known method.
8509    #[non_exhaustive]
8510    _UnknownMethod {
8511        /// Ordinal of the method that was called.
8512        ordinal: u64,
8513        control_handle: BufferCollectionTokenControlHandle,
8514        method_type: fidl::MethodType,
8515    },
8516}
8517
8518impl BufferCollectionTokenRequest {
8519    #[allow(irrefutable_let_patterns)]
8520    pub fn into_sync(self) -> Option<(BufferCollectionTokenSyncResponder)> {
8521        if let BufferCollectionTokenRequest::Sync { responder } = self {
8522            Some((responder))
8523        } else {
8524            None
8525        }
8526    }
8527
8528    #[allow(irrefutable_let_patterns)]
8529    pub fn into_release(self) -> Option<(BufferCollectionTokenControlHandle)> {
8530        if let BufferCollectionTokenRequest::Release { control_handle } = self {
8531            Some((control_handle))
8532        } else {
8533            None
8534        }
8535    }
8536
8537    #[allow(irrefutable_let_patterns)]
8538    pub fn into_set_name(self) -> Option<(NodeSetNameRequest, BufferCollectionTokenControlHandle)> {
8539        if let BufferCollectionTokenRequest::SetName { payload, control_handle } = self {
8540            Some((payload, control_handle))
8541        } else {
8542            None
8543        }
8544    }
8545
8546    #[allow(irrefutable_let_patterns)]
8547    pub fn into_set_debug_client_info(
8548        self,
8549    ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionTokenControlHandle)> {
8550        if let BufferCollectionTokenRequest::SetDebugClientInfo { payload, control_handle } = self {
8551            Some((payload, control_handle))
8552        } else {
8553            None
8554        }
8555    }
8556
8557    #[allow(irrefutable_let_patterns)]
8558    pub fn into_set_debug_timeout_log_deadline(
8559        self,
8560    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionTokenControlHandle)> {
8561        if let BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline {
8562            payload,
8563            control_handle,
8564        } = self
8565        {
8566            Some((payload, control_handle))
8567        } else {
8568            None
8569        }
8570    }
8571
8572    #[allow(irrefutable_let_patterns)]
8573    pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionTokenControlHandle)> {
8574        if let BufferCollectionTokenRequest::SetVerboseLogging { control_handle } = self {
8575            Some((control_handle))
8576        } else {
8577            None
8578        }
8579    }
8580
8581    #[allow(irrefutable_let_patterns)]
8582    pub fn into_get_node_ref(self) -> Option<(BufferCollectionTokenGetNodeRefResponder)> {
8583        if let BufferCollectionTokenRequest::GetNodeRef { responder } = self {
8584            Some((responder))
8585        } else {
8586            None
8587        }
8588    }
8589
8590    #[allow(irrefutable_let_patterns)]
8591    pub fn into_is_alternate_for(
8592        self,
8593    ) -> Option<(NodeIsAlternateForRequest, BufferCollectionTokenIsAlternateForResponder)> {
8594        if let BufferCollectionTokenRequest::IsAlternateFor { payload, responder } = self {
8595            Some((payload, responder))
8596        } else {
8597            None
8598        }
8599    }
8600
8601    #[allow(irrefutable_let_patterns)]
8602    pub fn into_get_buffer_collection_id(
8603        self,
8604    ) -> Option<(BufferCollectionTokenGetBufferCollectionIdResponder)> {
8605        if let BufferCollectionTokenRequest::GetBufferCollectionId { responder } = self {
8606            Some((responder))
8607        } else {
8608            None
8609        }
8610    }
8611
8612    #[allow(irrefutable_let_patterns)]
8613    pub fn into_set_weak(self) -> Option<(BufferCollectionTokenControlHandle)> {
8614        if let BufferCollectionTokenRequest::SetWeak { control_handle } = self {
8615            Some((control_handle))
8616        } else {
8617            None
8618        }
8619    }
8620
8621    #[allow(irrefutable_let_patterns)]
8622    pub fn into_set_weak_ok(
8623        self,
8624    ) -> Option<(NodeSetWeakOkRequest, BufferCollectionTokenControlHandle)> {
8625        if let BufferCollectionTokenRequest::SetWeakOk { payload, control_handle } = self {
8626            Some((payload, control_handle))
8627        } else {
8628            None
8629        }
8630    }
8631
8632    #[allow(irrefutable_let_patterns)]
8633    pub fn into_attach_node_tracking(
8634        self,
8635    ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionTokenControlHandle)> {
8636        if let BufferCollectionTokenRequest::AttachNodeTracking { payload, control_handle } = self {
8637            Some((payload, control_handle))
8638        } else {
8639            None
8640        }
8641    }
8642
8643    #[allow(irrefutable_let_patterns)]
8644    pub fn into_duplicate_sync(
8645        self,
8646    ) -> Option<(
8647        BufferCollectionTokenDuplicateSyncRequest,
8648        BufferCollectionTokenDuplicateSyncResponder,
8649    )> {
8650        if let BufferCollectionTokenRequest::DuplicateSync { payload, responder } = self {
8651            Some((payload, responder))
8652        } else {
8653            None
8654        }
8655    }
8656
8657    #[allow(irrefutable_let_patterns)]
8658    pub fn into_duplicate(
8659        self,
8660    ) -> Option<(BufferCollectionTokenDuplicateRequest, BufferCollectionTokenControlHandle)> {
8661        if let BufferCollectionTokenRequest::Duplicate { payload, control_handle } = self {
8662            Some((payload, control_handle))
8663        } else {
8664            None
8665        }
8666    }
8667
8668    #[allow(irrefutable_let_patterns)]
8669    pub fn into_set_dispensable(self) -> Option<(BufferCollectionTokenControlHandle)> {
8670        if let BufferCollectionTokenRequest::SetDispensable { control_handle } = self {
8671            Some((control_handle))
8672        } else {
8673            None
8674        }
8675    }
8676
8677    #[allow(irrefutable_let_patterns)]
8678    pub fn into_create_buffer_collection_token_group(
8679        self,
8680    ) -> Option<(
8681        BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
8682        BufferCollectionTokenControlHandle,
8683    )> {
8684        if let BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup {
8685            payload,
8686            control_handle,
8687        } = self
8688        {
8689            Some((payload, control_handle))
8690        } else {
8691            None
8692        }
8693    }
8694
8695    /// Name of the method defined in FIDL
8696    pub fn method_name(&self) -> &'static str {
8697        match *self {
8698            BufferCollectionTokenRequest::Sync { .. } => "sync",
8699            BufferCollectionTokenRequest::Release { .. } => "release",
8700            BufferCollectionTokenRequest::SetName { .. } => "set_name",
8701            BufferCollectionTokenRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
8702            BufferCollectionTokenRequest::SetDebugTimeoutLogDeadline { .. } => {
8703                "set_debug_timeout_log_deadline"
8704            }
8705            BufferCollectionTokenRequest::SetVerboseLogging { .. } => "set_verbose_logging",
8706            BufferCollectionTokenRequest::GetNodeRef { .. } => "get_node_ref",
8707            BufferCollectionTokenRequest::IsAlternateFor { .. } => "is_alternate_for",
8708            BufferCollectionTokenRequest::GetBufferCollectionId { .. } => {
8709                "get_buffer_collection_id"
8710            }
8711            BufferCollectionTokenRequest::SetWeak { .. } => "set_weak",
8712            BufferCollectionTokenRequest::SetWeakOk { .. } => "set_weak_ok",
8713            BufferCollectionTokenRequest::AttachNodeTracking { .. } => "attach_node_tracking",
8714            BufferCollectionTokenRequest::DuplicateSync { .. } => "duplicate_sync",
8715            BufferCollectionTokenRequest::Duplicate { .. } => "duplicate",
8716            BufferCollectionTokenRequest::SetDispensable { .. } => "set_dispensable",
8717            BufferCollectionTokenRequest::CreateBufferCollectionTokenGroup { .. } => {
8718                "create_buffer_collection_token_group"
8719            }
8720            BufferCollectionTokenRequest::_UnknownMethod {
8721                method_type: fidl::MethodType::OneWay,
8722                ..
8723            } => "unknown one-way method",
8724            BufferCollectionTokenRequest::_UnknownMethod {
8725                method_type: fidl::MethodType::TwoWay,
8726                ..
8727            } => "unknown two-way method",
8728        }
8729    }
8730}
8731
8732#[derive(Debug, Clone)]
8733pub struct BufferCollectionTokenControlHandle {
8734    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
8735}
8736
8737impl fidl::endpoints::ControlHandle for BufferCollectionTokenControlHandle {
8738    fn shutdown(&self) {
8739        self.inner.shutdown()
8740    }
8741    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
8742        self.inner.shutdown_with_epitaph(status)
8743    }
8744
8745    fn is_closed(&self) -> bool {
8746        self.inner.channel().is_closed()
8747    }
8748    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
8749        self.inner.channel().on_closed()
8750    }
8751
8752    #[cfg(target_os = "fuchsia")]
8753    fn signal_peer(
8754        &self,
8755        clear_mask: zx::Signals,
8756        set_mask: zx::Signals,
8757    ) -> Result<(), zx_status::Status> {
8758        use fidl::Peered;
8759        self.inner.channel().signal_peer(clear_mask, set_mask)
8760    }
8761}
8762
8763impl BufferCollectionTokenControlHandle {}
8764
8765#[must_use = "FIDL methods require a response to be sent"]
8766#[derive(Debug)]
8767pub struct BufferCollectionTokenSyncResponder {
8768    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8769    tx_id: u32,
8770}
8771
8772/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8773/// if the responder is dropped without sending a response, so that the client
8774/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8775impl std::ops::Drop for BufferCollectionTokenSyncResponder {
8776    fn drop(&mut self) {
8777        self.control_handle.shutdown();
8778        // Safety: drops once, never accessed again
8779        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8780    }
8781}
8782
8783impl fidl::endpoints::Responder for BufferCollectionTokenSyncResponder {
8784    type ControlHandle = BufferCollectionTokenControlHandle;
8785
8786    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8787        &self.control_handle
8788    }
8789
8790    fn drop_without_shutdown(mut self) {
8791        // Safety: drops once, never accessed again due to mem::forget
8792        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8793        // Prevent Drop from running (which would shut down the channel)
8794        std::mem::forget(self);
8795    }
8796}
8797
8798impl BufferCollectionTokenSyncResponder {
8799    /// Sends a response to the FIDL transaction.
8800    ///
8801    /// Sets the channel to shutdown if an error occurs.
8802    pub fn send(self) -> Result<(), fidl::Error> {
8803        let _result = self.send_raw();
8804        if _result.is_err() {
8805            self.control_handle.shutdown();
8806        }
8807        self.drop_without_shutdown();
8808        _result
8809    }
8810
8811    /// Similar to "send" but does not shutdown the channel if an error occurs.
8812    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
8813        let _result = self.send_raw();
8814        self.drop_without_shutdown();
8815        _result
8816    }
8817
8818    fn send_raw(&self) -> Result<(), fidl::Error> {
8819        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
8820            fidl::encoding::Flexible::new(()),
8821            self.tx_id,
8822            0x11ac2555cf575b54,
8823            fidl::encoding::DynamicFlags::FLEXIBLE,
8824        )
8825    }
8826}
8827
8828#[must_use = "FIDL methods require a response to be sent"]
8829#[derive(Debug)]
8830pub struct BufferCollectionTokenGetNodeRefResponder {
8831    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8832    tx_id: u32,
8833}
8834
8835/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8836/// if the responder is dropped without sending a response, so that the client
8837/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8838impl std::ops::Drop for BufferCollectionTokenGetNodeRefResponder {
8839    fn drop(&mut self) {
8840        self.control_handle.shutdown();
8841        // Safety: drops once, never accessed again
8842        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8843    }
8844}
8845
8846impl fidl::endpoints::Responder for BufferCollectionTokenGetNodeRefResponder {
8847    type ControlHandle = BufferCollectionTokenControlHandle;
8848
8849    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8850        &self.control_handle
8851    }
8852
8853    fn drop_without_shutdown(mut self) {
8854        // Safety: drops once, never accessed again due to mem::forget
8855        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8856        // Prevent Drop from running (which would shut down the channel)
8857        std::mem::forget(self);
8858    }
8859}
8860
8861impl BufferCollectionTokenGetNodeRefResponder {
8862    /// Sends a response to the FIDL transaction.
8863    ///
8864    /// Sets the channel to shutdown if an error occurs.
8865    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
8866        let _result = self.send_raw(payload);
8867        if _result.is_err() {
8868            self.control_handle.shutdown();
8869        }
8870        self.drop_without_shutdown();
8871        _result
8872    }
8873
8874    /// Similar to "send" but does not shutdown the channel if an error occurs.
8875    pub fn send_no_shutdown_on_err(
8876        self,
8877        mut payload: NodeGetNodeRefResponse,
8878    ) -> Result<(), fidl::Error> {
8879        let _result = self.send_raw(payload);
8880        self.drop_without_shutdown();
8881        _result
8882    }
8883
8884    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
8885        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
8886            fidl::encoding::Flexible::new(&mut payload),
8887            self.tx_id,
8888            0x5b3d0e51614df053,
8889            fidl::encoding::DynamicFlags::FLEXIBLE,
8890        )
8891    }
8892}
8893
8894#[must_use = "FIDL methods require a response to be sent"]
8895#[derive(Debug)]
8896pub struct BufferCollectionTokenIsAlternateForResponder {
8897    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8898    tx_id: u32,
8899}
8900
8901/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8902/// if the responder is dropped without sending a response, so that the client
8903/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8904impl std::ops::Drop for BufferCollectionTokenIsAlternateForResponder {
8905    fn drop(&mut self) {
8906        self.control_handle.shutdown();
8907        // Safety: drops once, never accessed again
8908        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8909    }
8910}
8911
8912impl fidl::endpoints::Responder for BufferCollectionTokenIsAlternateForResponder {
8913    type ControlHandle = BufferCollectionTokenControlHandle;
8914
8915    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8916        &self.control_handle
8917    }
8918
8919    fn drop_without_shutdown(mut self) {
8920        // Safety: drops once, never accessed again due to mem::forget
8921        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8922        // Prevent Drop from running (which would shut down the channel)
8923        std::mem::forget(self);
8924    }
8925}
8926
8927impl BufferCollectionTokenIsAlternateForResponder {
8928    /// Sends a response to the FIDL transaction.
8929    ///
8930    /// Sets the channel to shutdown if an error occurs.
8931    pub fn send(
8932        self,
8933        mut result: Result<&NodeIsAlternateForResponse, Error>,
8934    ) -> Result<(), fidl::Error> {
8935        let _result = self.send_raw(result);
8936        if _result.is_err() {
8937            self.control_handle.shutdown();
8938        }
8939        self.drop_without_shutdown();
8940        _result
8941    }
8942
8943    /// Similar to "send" but does not shutdown the channel if an error occurs.
8944    pub fn send_no_shutdown_on_err(
8945        self,
8946        mut result: Result<&NodeIsAlternateForResponse, Error>,
8947    ) -> Result<(), fidl::Error> {
8948        let _result = self.send_raw(result);
8949        self.drop_without_shutdown();
8950        _result
8951    }
8952
8953    fn send_raw(
8954        &self,
8955        mut result: Result<&NodeIsAlternateForResponse, Error>,
8956    ) -> Result<(), fidl::Error> {
8957        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
8958            NodeIsAlternateForResponse,
8959            Error,
8960        >>(
8961            fidl::encoding::FlexibleResult::new(result),
8962            self.tx_id,
8963            0x3a58e00157e0825,
8964            fidl::encoding::DynamicFlags::FLEXIBLE,
8965        )
8966    }
8967}
8968
8969#[must_use = "FIDL methods require a response to be sent"]
8970#[derive(Debug)]
8971pub struct BufferCollectionTokenGetBufferCollectionIdResponder {
8972    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
8973    tx_id: u32,
8974}
8975
8976/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
8977/// if the responder is dropped without sending a response, so that the client
8978/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
8979impl std::ops::Drop for BufferCollectionTokenGetBufferCollectionIdResponder {
8980    fn drop(&mut self) {
8981        self.control_handle.shutdown();
8982        // Safety: drops once, never accessed again
8983        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8984    }
8985}
8986
8987impl fidl::endpoints::Responder for BufferCollectionTokenGetBufferCollectionIdResponder {
8988    type ControlHandle = BufferCollectionTokenControlHandle;
8989
8990    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
8991        &self.control_handle
8992    }
8993
8994    fn drop_without_shutdown(mut self) {
8995        // Safety: drops once, never accessed again due to mem::forget
8996        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
8997        // Prevent Drop from running (which would shut down the channel)
8998        std::mem::forget(self);
8999    }
9000}
9001
9002impl BufferCollectionTokenGetBufferCollectionIdResponder {
9003    /// Sends a response to the FIDL transaction.
9004    ///
9005    /// Sets the channel to shutdown if an error occurs.
9006    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
9007        let _result = self.send_raw(payload);
9008        if _result.is_err() {
9009            self.control_handle.shutdown();
9010        }
9011        self.drop_without_shutdown();
9012        _result
9013    }
9014
9015    /// Similar to "send" but does not shutdown the channel if an error occurs.
9016    pub fn send_no_shutdown_on_err(
9017        self,
9018        mut payload: &NodeGetBufferCollectionIdResponse,
9019    ) -> Result<(), fidl::Error> {
9020        let _result = self.send_raw(payload);
9021        self.drop_without_shutdown();
9022        _result
9023    }
9024
9025    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
9026        self.control_handle
9027            .inner
9028            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
9029                fidl::encoding::Flexible::new(payload),
9030                self.tx_id,
9031                0x77d19a494b78ba8c,
9032                fidl::encoding::DynamicFlags::FLEXIBLE,
9033            )
9034    }
9035}
9036
9037#[must_use = "FIDL methods require a response to be sent"]
9038#[derive(Debug)]
9039pub struct BufferCollectionTokenDuplicateSyncResponder {
9040    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenControlHandle>,
9041    tx_id: u32,
9042}
9043
9044/// Set the the channel to be shutdown (see [`BufferCollectionTokenControlHandle::shutdown`])
9045/// if the responder is dropped without sending a response, so that the client
9046/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
9047impl std::ops::Drop for BufferCollectionTokenDuplicateSyncResponder {
9048    fn drop(&mut self) {
9049        self.control_handle.shutdown();
9050        // Safety: drops once, never accessed again
9051        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9052    }
9053}
9054
9055impl fidl::endpoints::Responder for BufferCollectionTokenDuplicateSyncResponder {
9056    type ControlHandle = BufferCollectionTokenControlHandle;
9057
9058    fn control_handle(&self) -> &BufferCollectionTokenControlHandle {
9059        &self.control_handle
9060    }
9061
9062    fn drop_without_shutdown(mut self) {
9063        // Safety: drops once, never accessed again due to mem::forget
9064        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
9065        // Prevent Drop from running (which would shut down the channel)
9066        std::mem::forget(self);
9067    }
9068}
9069
9070impl BufferCollectionTokenDuplicateSyncResponder {
9071    /// Sends a response to the FIDL transaction.
9072    ///
9073    /// Sets the channel to shutdown if an error occurs.
9074    pub fn send(
9075        self,
9076        mut payload: BufferCollectionTokenDuplicateSyncResponse,
9077    ) -> Result<(), fidl::Error> {
9078        let _result = self.send_raw(payload);
9079        if _result.is_err() {
9080            self.control_handle.shutdown();
9081        }
9082        self.drop_without_shutdown();
9083        _result
9084    }
9085
9086    /// Similar to "send" but does not shutdown the channel if an error occurs.
9087    pub fn send_no_shutdown_on_err(
9088        self,
9089        mut payload: BufferCollectionTokenDuplicateSyncResponse,
9090    ) -> Result<(), fidl::Error> {
9091        let _result = self.send_raw(payload);
9092        self.drop_without_shutdown();
9093        _result
9094    }
9095
9096    fn send_raw(
9097        &self,
9098        mut payload: BufferCollectionTokenDuplicateSyncResponse,
9099    ) -> Result<(), fidl::Error> {
9100        self.control_handle.inner.send::<fidl::encoding::FlexibleType<
9101            BufferCollectionTokenDuplicateSyncResponse,
9102        >>(
9103            fidl::encoding::Flexible::new(&mut payload),
9104            self.tx_id,
9105            0x1c1af9919d1ca45c,
9106            fidl::encoding::DynamicFlags::FLEXIBLE,
9107        )
9108    }
9109}
9110
9111#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
9112pub struct BufferCollectionTokenGroupMarker;
9113
9114impl fidl::endpoints::ProtocolMarker for BufferCollectionTokenGroupMarker {
9115    type Proxy = BufferCollectionTokenGroupProxy;
9116    type RequestStream = BufferCollectionTokenGroupRequestStream;
9117    #[cfg(target_os = "fuchsia")]
9118    type SynchronousProxy = BufferCollectionTokenGroupSynchronousProxy;
9119
9120    const DEBUG_NAME: &'static str = "(anonymous) BufferCollectionTokenGroup";
9121}
9122
9123pub trait BufferCollectionTokenGroupProxyInterface: Send + Sync {
9124    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
9125    fn r#sync(&self) -> Self::SyncResponseFut;
9126    fn r#release(&self) -> Result<(), fidl::Error>;
9127    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
9128    fn r#set_debug_client_info(
9129        &self,
9130        payload: &NodeSetDebugClientInfoRequest,
9131    ) -> Result<(), fidl::Error>;
9132    fn r#set_debug_timeout_log_deadline(
9133        &self,
9134        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
9135    ) -> Result<(), fidl::Error>;
9136    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
9137    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
9138        + Send;
9139    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
9140    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
9141        + Send;
9142    fn r#is_alternate_for(
9143        &self,
9144        payload: NodeIsAlternateForRequest,
9145    ) -> Self::IsAlternateForResponseFut;
9146    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
9147        + Send;
9148    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
9149    fn r#set_weak(&self) -> Result<(), fidl::Error>;
9150    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
9151    fn r#attach_node_tracking(
9152        &self,
9153        payload: NodeAttachNodeTrackingRequest,
9154    ) -> Result<(), fidl::Error>;
9155    fn r#create_child(
9156        &self,
9157        payload: BufferCollectionTokenGroupCreateChildRequest,
9158    ) -> Result<(), fidl::Error>;
9159    type CreateChildrenSyncResponseFut: std::future::Future<
9160            Output = Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error>,
9161        > + Send;
9162    fn r#create_children_sync(
9163        &self,
9164        payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
9165    ) -> Self::CreateChildrenSyncResponseFut;
9166    fn r#all_children_present(&self) -> Result<(), fidl::Error>;
9167}
9168#[derive(Debug)]
9169#[cfg(target_os = "fuchsia")]
9170pub struct BufferCollectionTokenGroupSynchronousProxy {
9171    client: fidl::client::sync::Client,
9172}
9173
9174#[cfg(target_os = "fuchsia")]
9175impl fidl::endpoints::SynchronousProxy for BufferCollectionTokenGroupSynchronousProxy {
9176    type Proxy = BufferCollectionTokenGroupProxy;
9177    type Protocol = BufferCollectionTokenGroupMarker;
9178
9179    fn from_channel(inner: fidl::Channel) -> Self {
9180        Self::new(inner)
9181    }
9182
9183    fn into_channel(self) -> fidl::Channel {
9184        self.client.into_channel()
9185    }
9186
9187    fn as_channel(&self) -> &fidl::Channel {
9188        self.client.as_channel()
9189    }
9190}
9191
9192#[cfg(target_os = "fuchsia")]
9193impl BufferCollectionTokenGroupSynchronousProxy {
9194    pub fn new(channel: fidl::Channel) -> Self {
9195        let protocol_name =
9196            <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
9197        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
9198    }
9199
9200    pub fn into_channel(self) -> fidl::Channel {
9201        self.client.into_channel()
9202    }
9203
9204    /// Waits until an event arrives and returns it. It is safe for other
9205    /// threads to make concurrent requests while waiting for an event.
9206    pub fn wait_for_event(
9207        &self,
9208        deadline: zx::MonotonicInstant,
9209    ) -> Result<BufferCollectionTokenGroupEvent, fidl::Error> {
9210        BufferCollectionTokenGroupEvent::decode(self.client.wait_for_event(deadline)?)
9211    }
9212
9213    /// Ensure that previous messages have been received server side. This is
9214    /// particularly useful after previous messages that created new tokens,
9215    /// because a token must be known to the sysmem server before sending the
9216    /// token to another participant.
9217    ///
9218    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
9219    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
9220    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
9221    /// to mitigate the possibility of a hostile/fake
9222    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
9223    /// Another way is to pass the token to
9224    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
9225    /// the token as part of exchanging it for a
9226    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
9227    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
9228    /// of stalling.
9229    ///
9230    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
9231    /// and then starting and completing a `Sync`, it's then safe to send the
9232    /// `BufferCollectionToken` client ends to other participants knowing the
9233    /// server will recognize the tokens when they're sent by the other
9234    /// participants to sysmem in a
9235    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
9236    /// efficient way to create tokens while avoiding unnecessary round trips.
9237    ///
9238    /// Other options include waiting for each
9239    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
9240    /// individually (using separate call to `Sync` after each), or calling
9241    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
9242    /// converted to a `BufferCollection` via
9243    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
9244    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
9245    /// the sync step and can create multiple tokens at once.
9246    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
9247        let _response = self.client.send_query::<
9248            fidl::encoding::EmptyPayload,
9249            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
9250        >(
9251            (),
9252            0x11ac2555cf575b54,
9253            fidl::encoding::DynamicFlags::FLEXIBLE,
9254            ___deadline,
9255        )?
9256        .into_result::<BufferCollectionTokenGroupMarker>("sync")?;
9257        Ok(_response)
9258    }
9259
9260    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
9261    ///
9262    /// Normally a participant will convert a `BufferCollectionToken` into a
9263    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
9264    /// `Release` via the token (and then close the channel immediately or
9265    /// shortly later in response to server closing the server end), which
9266    /// avoids causing buffer collection failure. Without a prior `Release`,
9267    /// closing the `BufferCollectionToken` client end will cause buffer
9268    /// collection failure.
9269    ///
9270    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
9271    ///
9272    /// By default the server handles unexpected closure of a
9273    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
9274    /// first) by failing the buffer collection. Partly this is to expedite
9275    /// closing VMO handles to reclaim memory when any participant fails. If a
9276    /// participant would like to cleanly close a `BufferCollection` without
9277    /// causing buffer collection failure, the participant can send `Release`
9278    /// before closing the `BufferCollection` client end. The `Release` can
9279    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
9280    /// buffer collection won't require constraints from this node in order to
9281    /// allocate. If after `SetConstraints`, the constraints are retained and
9282    /// aggregated, despite the lack of `BufferCollection` connection at the
9283    /// time of constraints aggregation.
9284    ///
9285    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
9286    ///
9287    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
9288    /// end (without `Release` first) will trigger failure of the buffer
9289    /// collection. To close a `BufferCollectionTokenGroup` channel without
9290    /// failing the buffer collection, ensure that AllChildrenPresent() has been
9291    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
9292    /// client end.
9293    ///
9294    /// If `Release` occurs before
9295    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
9296    /// buffer collection will fail (triggered by reception of `Release` without
9297    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
9298    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
9299    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
9300    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
9301    /// close requires `AllChildrenPresent` (if not already sent), then
9302    /// `Release`, then close client end.
9303    ///
9304    /// If `Release` occurs after `AllChildrenPresent`, the children and all
9305    /// their constraints remain intact (just as they would if the
9306    /// `BufferCollectionTokenGroup` channel had remained open), and the client
9307    /// end close doesn't trigger buffer collection failure.
9308    ///
9309    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
9310    ///
9311    /// For brevity, the per-channel-protocol paragraphs above ignore the
9312    /// separate failure domain created by
9313    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
9314    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
9315    /// unexpectedly closes (without `Release` first) and that client end is
9316    /// under a failure domain, instead of failing the whole buffer collection,
9317    /// the failure domain is failed, but the buffer collection itself is
9318    /// isolated from failure of the failure domain. Such failure domains can be
9319    /// nested, in which case only the inner-most failure domain in which the
9320    /// `Node` resides fails.
9321    pub fn r#release(&self) -> Result<(), fidl::Error> {
9322        self.client.send::<fidl::encoding::EmptyPayload>(
9323            (),
9324            0x6a5cae7d6d6e04c6,
9325            fidl::encoding::DynamicFlags::FLEXIBLE,
9326        )
9327    }
9328
9329    /// Set a name for VMOs in this buffer collection.
9330    ///
9331    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
9332    /// will be truncated to fit. The name of the vmo will be suffixed with the
9333    /// buffer index within the collection (if the suffix fits within
9334    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
9335    /// listed in the inspect data.
9336    ///
9337    /// The name only affects VMOs allocated after the name is set; this call
9338    /// does not rename existing VMOs. If multiple clients set different names
9339    /// then the larger priority value will win. Setting a new name with the
9340    /// same priority as a prior name doesn't change the name.
9341    ///
9342    /// All table fields are currently required.
9343    ///
9344    /// + request `priority` The name is only set if this is the first `SetName`
9345    ///   or if `priority` is greater than any previous `priority` value in
9346    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
9347    /// + request `name` The name for VMOs created under this buffer collection.
9348    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
9349        self.client.send::<NodeSetNameRequest>(
9350            payload,
9351            0xb41f1624f48c1e9,
9352            fidl::encoding::DynamicFlags::FLEXIBLE,
9353        )
9354    }
9355
9356    /// Set information about the current client that can be used by sysmem to
9357    /// help diagnose leaking memory and allocation stalls waiting for a
9358    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
9359    ///
9360    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
9361    /// `Node`(s) derived from this `Node`, unless overriden by
9362    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
9363    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
9364    ///
9365    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
9366    /// `Allocator` is the most efficient way to ensure that all
9367    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
9368    /// set, and is also more efficient than separately sending the same debug
9369    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
9370    /// created [`fuchsia.sysmem2/Node`].
9371    ///
9372    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
9373    /// indicate which client is closing their channel first, leading to subtree
9374    /// failure (which can be normal if the purpose of the subtree is over, but
9375    /// if happening earlier than expected, the client-channel-specific name can
9376    /// help diagnose where the failure is first coming from, from sysmem's
9377    /// point of view).
9378    ///
9379    /// All table fields are currently required.
9380    ///
9381    /// + request `name` This can be an arbitrary string, but the current
9382    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
9383    /// + request `id` This can be an arbitrary id, but the current process ID
9384    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
9385    pub fn r#set_debug_client_info(
9386        &self,
9387        mut payload: &NodeSetDebugClientInfoRequest,
9388    ) -> Result<(), fidl::Error> {
9389        self.client.send::<NodeSetDebugClientInfoRequest>(
9390            payload,
9391            0x5cde8914608d99b1,
9392            fidl::encoding::DynamicFlags::FLEXIBLE,
9393        )
9394    }
9395
9396    /// Sysmem logs a warning if sysmem hasn't seen
9397    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
9398    /// within 5 seconds after creation of a new collection.
9399    ///
9400    /// Clients can call this method to change when the log is printed. If
9401    /// multiple client set the deadline, it's unspecified which deadline will
9402    /// take effect.
9403    ///
9404    /// In most cases the default works well.
9405    ///
9406    /// All table fields are currently required.
9407    ///
9408    /// + request `deadline` The time at which sysmem will start trying to log
9409    ///   the warning, unless all constraints are with sysmem by then.
9410    pub fn r#set_debug_timeout_log_deadline(
9411        &self,
9412        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
9413    ) -> Result<(), fidl::Error> {
9414        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
9415            payload,
9416            0x716b0af13d5c0806,
9417            fidl::encoding::DynamicFlags::FLEXIBLE,
9418        )
9419    }
9420
9421    /// This enables verbose logging for the buffer collection.
9422    ///
9423    /// Verbose logging includes constraints set via
9424    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
9425    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
9426    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
9427    /// the tree of `Node`(s).
9428    ///
9429    /// Normally sysmem prints only a single line complaint when aggregation
9430    /// fails, with just the specific detailed reason that aggregation failed,
9431    /// with little surrounding context.  While this is often enough to diagnose
9432    /// a problem if only a small change was made and everything was working
9433    /// before the small change, it's often not particularly helpful for getting
9434    /// a new buffer collection to work for the first time.  Especially with
9435    /// more complex trees of nodes, involving things like
9436    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
9437    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
9438    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
9439    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
9440    /// looks like and why it's failing a logical allocation, or why a tree or
9441    /// subtree is failing sooner than expected.
9442    ///
9443    /// The intent of the extra logging is to be acceptable from a performance
9444    /// point of view, under the assumption that verbose logging is only enabled
9445    /// on a low number of buffer collections. If we're not tracking down a bug,
9446    /// we shouldn't send this message.
9447    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
9448        self.client.send::<fidl::encoding::EmptyPayload>(
9449            (),
9450            0x5209c77415b4dfad,
9451            fidl::encoding::DynamicFlags::FLEXIBLE,
9452        )
9453    }
9454
9455    /// This gets a handle that can be used as a parameter to
9456    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
9457    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
9458    /// client obtained this handle from this `Node`.
9459    ///
9460    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
9461    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
9462    /// despite the two calls typically being on different channels.
9463    ///
9464    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
9465    ///
9466    /// All table fields are currently required.
9467    ///
9468    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
9469    ///   different `Node` channel, to prove that the client obtained the handle
9470    ///   from this `Node`.
9471    pub fn r#get_node_ref(
9472        &self,
9473        ___deadline: zx::MonotonicInstant,
9474    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
9475        let _response = self.client.send_query::<
9476            fidl::encoding::EmptyPayload,
9477            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
9478        >(
9479            (),
9480            0x5b3d0e51614df053,
9481            fidl::encoding::DynamicFlags::FLEXIBLE,
9482            ___deadline,
9483        )?
9484        .into_result::<BufferCollectionTokenGroupMarker>("get_node_ref")?;
9485        Ok(_response)
9486    }
9487
9488    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
9489    /// rooted at a different child token of a common parent
9490    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
9491    /// passed-in `node_ref`.
9492    ///
9493    /// This call is for assisting with admission control de-duplication, and
9494    /// with debugging.
9495    ///
9496    /// The `node_ref` must be obtained using
9497    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
9498    ///
9499    /// The `node_ref` can be a duplicated handle; it's not necessary to call
9500    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
9501    ///
9502    /// If a calling token may not actually be a valid token at all due to a
9503    /// potentially hostile/untrusted provider of the token, call
9504    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
9505    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
9506    /// never responds due to a calling token not being a real token (not really
9507    /// talking to sysmem).  Another option is to call
9508    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
9509    /// which also validates the token along with converting it to a
9510    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
9511    ///
9512    /// All table fields are currently required.
9513    ///
9514    /// - response `is_alternate`
9515    ///   - true: The first parent node in common between the calling node and
9516    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
9517    ///     that the calling `Node` and the `node_ref` `Node` will not have both
9518    ///     their constraints apply - rather sysmem will choose one or the other
9519    ///     of the constraints - never both.  This is because only one child of
9520    ///     a `BufferCollectionTokenGroup` is selected during logical
9521    ///     allocation, with only that one child's subtree contributing to
9522    ///     constraints aggregation.
9523    ///   - false: The first parent node in common between the calling `Node`
9524    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
9525    ///     Currently, this means the first parent node in common is a
9526    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
9527    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
9528    ///     `Node` may have both their constraints apply during constraints
9529    ///     aggregation of the logical allocation, if both `Node`(s) are
9530    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
9531    ///     this case, there is no `BufferCollectionTokenGroup` that will
9532    ///     directly prevent the two `Node`(s) from both being selected and
9533    ///     their constraints both aggregated, but even when false, one or both
9534    ///     `Node`(s) may still be eliminated from consideration if one or both
9535    ///     `Node`(s) has a direct or indirect parent
9536    ///     `BufferCollectionTokenGroup` which selects a child subtree other
9537    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
9538    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
9539    ///   associated with the same buffer collection as the calling `Node`.
9540    ///   Another reason for this error is if the `node_ref` is an
9541    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
9542    ///   a real `node_ref` obtained from `GetNodeRef`.
9543    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
9544    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
9545    ///   the needed rights expected on a real `node_ref`.
9546    /// * No other failing status codes are returned by this call.  However,
9547    ///   sysmem may add additional codes in future, so the client should have
9548    ///   sensible default handling for any failing status code.
9549    pub fn r#is_alternate_for(
9550        &self,
9551        mut payload: NodeIsAlternateForRequest,
9552        ___deadline: zx::MonotonicInstant,
9553    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
9554        let _response = self.client.send_query::<
9555            NodeIsAlternateForRequest,
9556            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
9557        >(
9558            &mut payload,
9559            0x3a58e00157e0825,
9560            fidl::encoding::DynamicFlags::FLEXIBLE,
9561            ___deadline,
9562        )?
9563        .into_result::<BufferCollectionTokenGroupMarker>("is_alternate_for")?;
9564        Ok(_response.map(|x| x))
9565    }
9566
9567    /// Get the buffer collection ID. This ID is also available from
9568    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
9569    /// within the collection).
9570    ///
9571    /// This call is mainly useful in situations where we can't convey a
9572    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
9573    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
9574    /// handle, which can be joined back up with a `BufferCollection` client end
9575    /// that was created via a different path. Prefer to convey a
9576    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
9577    ///
9578    /// Trusting a `buffer_collection_id` value from a source other than sysmem
9579    /// is analogous to trusting a koid value from a source other than zircon.
9580    /// Both should be avoided unless really necessary, and both require
9581    /// caution. In some situations it may be reasonable to refer to a
9582    /// pre-established `BufferCollection` by `buffer_collection_id` via a
9583    /// protocol for efficiency reasons, but an incoming value purporting to be
9584    /// a `buffer_collection_id` is not sufficient alone to justify granting the
9585    /// sender of the `buffer_collection_id` any capability. The sender must
9586    /// first prove to a receiver that the sender has/had a VMO or has/had a
9587    /// `BufferCollectionToken` to the same collection by sending a handle that
9588    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
9589    /// `buffer_collection_id` value. The receiver should take care to avoid
9590    /// assuming that a sender had a `BufferCollectionToken` in cases where the
9591    /// sender has only proven that the sender had a VMO.
9592    ///
9593    /// - response `buffer_collection_id` This ID is unique per buffer
9594    ///   collection per boot. Each buffer is uniquely identified by the
9595    ///   `buffer_collection_id` and `buffer_index` together.
9596    pub fn r#get_buffer_collection_id(
9597        &self,
9598        ___deadline: zx::MonotonicInstant,
9599    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
9600        let _response = self.client.send_query::<
9601            fidl::encoding::EmptyPayload,
9602            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
9603        >(
9604            (),
9605            0x77d19a494b78ba8c,
9606            fidl::encoding::DynamicFlags::FLEXIBLE,
9607            ___deadline,
9608        )?
9609        .into_result::<BufferCollectionTokenGroupMarker>("get_buffer_collection_id")?;
9610        Ok(_response)
9611    }
9612
9613    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
9614    /// created after this message to weak, which means that a client's `Node`
9615    /// client end (or a child created after this message) is not alone
9616    /// sufficient to keep allocated VMOs alive.
9617    ///
9618    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
9619    /// `close_weak_asap`.
9620    ///
9621    /// This message is only permitted before the `Node` becomes ready for
9622    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
9623    ///   * `BufferCollectionToken`: any time
9624    ///   * `BufferCollection`: before `SetConstraints`
9625    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
9626    ///
9627    /// Currently, no conversion from strong `Node` to weak `Node` after ready
9628    /// for allocation is provided, but a client can simulate that by creating
9629    /// an additional `Node` before allocation and setting that additional
9630    /// `Node` to weak, and then potentially at some point later sending
9631    /// `Release` and closing the client end of the client's strong `Node`, but
9632    /// keeping the client's weak `Node`.
9633    ///
9634    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
9635    /// collection failure (all `Node` client end(s) will see
9636    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
9637    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
9638    /// this situation until all `Node`(s) are ready for allocation. For initial
9639    /// allocation to succeed, at least one strong `Node` is required to exist
9640    /// at allocation time, but after that client receives VMO handles, that
9641    /// client can `BufferCollection.Release` and close the client end without
9642    /// causing this type of failure.
9643    ///
9644    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
9645    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
9646    /// separately as appropriate.
9647    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
9648        self.client.send::<fidl::encoding::EmptyPayload>(
9649            (),
9650            0x22dd3ea514eeffe1,
9651            fidl::encoding::DynamicFlags::FLEXIBLE,
9652        )
9653    }
9654
9655    /// This indicates to sysmem that the client is prepared to pay attention to
9656    /// `close_weak_asap`.
9657    ///
9658    /// If sent, this message must be before
9659    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
9660    ///
9661    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
9662    /// send this message before `WaitForAllBuffersAllocated`, or a parent
9663    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
9664    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
9665    /// trigger buffer collection failure.
9666    ///
9667    /// This message is necessary because weak sysmem VMOs have not always been
9668    /// a thing, so older clients are not aware of the need to pay attention to
9669    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
9670    /// sysmem weak VMO handles asap. By having this message and requiring
9671    /// participants to indicate their acceptance of this aspect of the overall
9672    /// protocol, we avoid situations where an older client is delivered a weak
9673    /// VMO without any way for sysmem to get that VMO to close quickly later
9674    /// (and on a per-buffer basis).
9675    ///
9676    /// A participant that doesn't handle `close_weak_asap` and also doesn't
9677    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
9678    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
9679    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
9680    /// same participant has a child/delegate which does retrieve VMOs, that
9681    /// child/delegate will need to send `SetWeakOk` before
9682    /// `WaitForAllBuffersAllocated`.
9683    ///
9684    /// + request `for_child_nodes_also` If present and true, this means direct
9685    ///   child nodes of this node created after this message plus all
9686    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
9687    ///   those nodes. Any child node of this node that was created before this
9688    ///   message is not included. This setting is "sticky" in the sense that a
9689    ///   subsequent `SetWeakOk` without this bool set to true does not reset
9690    ///   the server-side bool. If this creates a problem for a participant, a
9691    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
9692    ///   tokens instead, as appropriate. A participant should only set
9693    ///   `for_child_nodes_also` true if the participant can really promise to
9694    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
9695    ///   weak VMO handles held by participants holding the corresponding child
9696    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
9697    ///   which are using sysmem(1) can be weak, despite the clients of those
9698    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
9699    ///   direct way to find out about `close_weak_asap`. This only applies to
9700    ///   descendents of this `Node` which are using sysmem(1), not to this
9701    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
9702    ///   token, which will fail allocation unless an ancestor of this `Node`
9703    ///   specified `for_child_nodes_also` true.
9704    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
9705        self.client.send::<NodeSetWeakOkRequest>(
9706            &mut payload,
9707            0x38a44fc4d7724be9,
9708            fidl::encoding::DynamicFlags::FLEXIBLE,
9709        )
9710    }
9711
9712    /// The server_end will be closed after this `Node` and any child nodes have
9713    /// have released their buffer counts, making those counts available for
9714    /// reservation by a different `Node` via
9715    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
9716    ///
9717    /// The `Node` buffer counts may not be released until the entire tree of
9718    /// `Node`(s) is closed or failed, because
9719    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
9720    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
9721    /// `Node` buffer counts remain reserved until the orphaned node is later
9722    /// cleaned up.
9723    ///
9724    /// If the `Node` exceeds a fairly large number of attached eventpair server
9725    /// ends, a log message will indicate this and the `Node` (and the
9726    /// appropriate) sub-tree will fail.
9727    ///
9728    /// The `server_end` will remain open when
9729    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
9730    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
9731    /// [`fuchsia.sysmem2/BufferCollection`].
9732    ///
9733    /// This message can also be used with a
9734    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
9735    pub fn r#attach_node_tracking(
9736        &self,
9737        mut payload: NodeAttachNodeTrackingRequest,
9738    ) -> Result<(), fidl::Error> {
9739        self.client.send::<NodeAttachNodeTrackingRequest>(
9740            &mut payload,
9741            0x3f22f2a293d3cdac,
9742            fidl::encoding::DynamicFlags::FLEXIBLE,
9743        )
9744    }
9745
9746    /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
9747    /// (including its children) will be selected during allocation (or logical
9748    /// allocation).
9749    ///
9750    /// Before passing the client end of this token to
9751    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
9752    /// [`fuchsia.sysmem2/Node.Sync`] after
9753    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
9754    /// Or the client can use
9755    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
9756    /// essentially includes the `Sync`.
9757    ///
9758    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
9759    /// fail the group's subtree and close the connection.
9760    ///
9761    /// After all children have been created, send AllChildrenPresent.
9762    ///
9763    /// + request `token_request` The server end of the new token channel.
9764    /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
9765    ///   token allows the holder to get the same rights to buffers as the
9766    ///   parent token (of the group) had. When the value isn't
9767    ///   ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
9768    ///   bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
9769    ///   for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
9770    ///   causes subtree failure.
9771    pub fn r#create_child(
9772        &self,
9773        mut payload: BufferCollectionTokenGroupCreateChildRequest,
9774    ) -> Result<(), fidl::Error> {
9775        self.client.send::<BufferCollectionTokenGroupCreateChildRequest>(
9776            &mut payload,
9777            0x41a0075d419f30c5,
9778            fidl::encoding::DynamicFlags::FLEXIBLE,
9779        )
9780    }
9781
9782    /// Create 1 or more child tokens at once, synchronously.  In contrast to
9783    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
9784    /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
9785    /// of a returned token to
9786    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
9787    ///
9788    /// The lower-index child tokens are higher priority (attempted sooner) than
9789    /// higher-index child tokens.
9790    ///
9791    /// As per all child tokens, successful aggregation will choose exactly one
9792    /// child among all created children (across all children created across
9793    /// potentially multiple calls to
9794    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
9795    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
9796    ///
9797    /// The maximum permissible total number of children per group, and total
9798    /// number of nodes in an overall tree (from the root) are capped to limits
9799    /// which are not configurable via these protocols.
9800    ///
9801    /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
9802    /// this will fail the group's subtree and close the connection.
9803    ///
9804    /// After all children have been created, send AllChildrenPresent.
9805    ///
9806    /// + request `rights_attentuation_masks` The size of the
9807    ///   `rights_attentuation_masks` determines the number of created child
9808    ///   tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
9809    ///   The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
9810    ///   other value, each 0 bit in the mask attenuates that right.
9811    /// - response `tokens` The created child tokens.
9812    pub fn r#create_children_sync(
9813        &self,
9814        mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
9815        ___deadline: zx::MonotonicInstant,
9816    ) -> Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error> {
9817        let _response = self.client.send_query::<
9818            BufferCollectionTokenGroupCreateChildrenSyncRequest,
9819            fidl::encoding::FlexibleType<BufferCollectionTokenGroupCreateChildrenSyncResponse>,
9820        >(
9821            payload,
9822            0x15dea448c536070a,
9823            fidl::encoding::DynamicFlags::FLEXIBLE,
9824            ___deadline,
9825        )?
9826        .into_result::<BufferCollectionTokenGroupMarker>("create_children_sync")?;
9827        Ok(_response)
9828    }
9829
9830    /// Indicate that no more children will be created.
9831    ///
9832    /// After creating all children, the client should send
9833    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
9834    /// inform sysmem that no more children will be created, so that sysmem can
9835    /// know when it's ok to start aggregating constraints.
9836    ///
9837    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
9838    /// fail the group's subtree and close the connection.
9839    ///
9840    /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
9841    /// after `AllChildrenPresent`, else failure of the group's subtree will be
9842    /// triggered. This is intentionally not analogous to how `Release` without
9843    /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
9844    /// subtree failure.
9845    pub fn r#all_children_present(&self) -> Result<(), fidl::Error> {
9846        self.client.send::<fidl::encoding::EmptyPayload>(
9847            (),
9848            0x5c327e4a23391312,
9849            fidl::encoding::DynamicFlags::FLEXIBLE,
9850        )
9851    }
9852}
9853
9854#[cfg(target_os = "fuchsia")]
9855impl From<BufferCollectionTokenGroupSynchronousProxy> for zx::Handle {
9856    fn from(value: BufferCollectionTokenGroupSynchronousProxy) -> Self {
9857        value.into_channel().into()
9858    }
9859}
9860
9861#[cfg(target_os = "fuchsia")]
9862impl From<fidl::Channel> for BufferCollectionTokenGroupSynchronousProxy {
9863    fn from(value: fidl::Channel) -> Self {
9864        Self::new(value)
9865    }
9866}
9867
9868#[derive(Debug, Clone)]
9869pub struct BufferCollectionTokenGroupProxy {
9870    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
9871}
9872
9873impl fidl::endpoints::Proxy for BufferCollectionTokenGroupProxy {
9874    type Protocol = BufferCollectionTokenGroupMarker;
9875
9876    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
9877        Self::new(inner)
9878    }
9879
9880    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
9881        self.client.into_channel().map_err(|client| Self { client })
9882    }
9883
9884    fn as_channel(&self) -> &::fidl::AsyncChannel {
9885        self.client.as_channel()
9886    }
9887}
9888
9889impl BufferCollectionTokenGroupProxy {
9890    /// Create a new Proxy for fuchsia.sysmem2/BufferCollectionTokenGroup.
9891    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
9892        let protocol_name =
9893            <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
9894        Self { client: fidl::client::Client::new(channel, protocol_name) }
9895    }
9896
9897    /// Get a Stream of events from the remote end of the protocol.
9898    ///
9899    /// # Panics
9900    ///
9901    /// Panics if the event stream was already taken.
9902    pub fn take_event_stream(&self) -> BufferCollectionTokenGroupEventStream {
9903        BufferCollectionTokenGroupEventStream { event_receiver: self.client.take_event_receiver() }
9904    }
9905
9906    /// Ensure that previous messages have been received server side. This is
9907    /// particularly useful after previous messages that created new tokens,
9908    /// because a token must be known to the sysmem server before sending the
9909    /// token to another participant.
9910    ///
9911    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
9912    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
9913    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
9914    /// to mitigate the possibility of a hostile/fake
9915    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
9916    /// Another way is to pass the token to
9917    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
9918    /// the token as part of exchanging it for a
9919    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
9920    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
9921    /// of stalling.
9922    ///
9923    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
9924    /// and then starting and completing a `Sync`, it's then safe to send the
9925    /// `BufferCollectionToken` client ends to other participants knowing the
9926    /// server will recognize the tokens when they're sent by the other
9927    /// participants to sysmem in a
9928    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
9929    /// efficient way to create tokens while avoiding unnecessary round trips.
9930    ///
9931    /// Other options include waiting for each
9932    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
9933    /// individually (using separate call to `Sync` after each), or calling
9934    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
9935    /// converted to a `BufferCollection` via
9936    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
9937    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
9938    /// the sync step and can create multiple tokens at once.
9939    pub fn r#sync(
9940        &self,
9941    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
9942        BufferCollectionTokenGroupProxyInterface::r#sync(self)
9943    }
9944
9945    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
9946    ///
9947    /// Normally a participant will convert a `BufferCollectionToken` into a
9948    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
9949    /// `Release` via the token (and then close the channel immediately or
9950    /// shortly later in response to server closing the server end), which
9951    /// avoids causing buffer collection failure. Without a prior `Release`,
9952    /// closing the `BufferCollectionToken` client end will cause buffer
9953    /// collection failure.
9954    ///
9955    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
9956    ///
9957    /// By default the server handles unexpected closure of a
9958    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
9959    /// first) by failing the buffer collection. Partly this is to expedite
9960    /// closing VMO handles to reclaim memory when any participant fails. If a
9961    /// participant would like to cleanly close a `BufferCollection` without
9962    /// causing buffer collection failure, the participant can send `Release`
9963    /// before closing the `BufferCollection` client end. The `Release` can
9964    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
9965    /// buffer collection won't require constraints from this node in order to
9966    /// allocate. If after `SetConstraints`, the constraints are retained and
9967    /// aggregated, despite the lack of `BufferCollection` connection at the
9968    /// time of constraints aggregation.
9969    ///
9970    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
9971    ///
9972    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
9973    /// end (without `Release` first) will trigger failure of the buffer
9974    /// collection. To close a `BufferCollectionTokenGroup` channel without
9975    /// failing the buffer collection, ensure that AllChildrenPresent() has been
9976    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
9977    /// client end.
9978    ///
9979    /// If `Release` occurs before
9980    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
9981    /// buffer collection will fail (triggered by reception of `Release` without
9982    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
9983    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
9984    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
9985    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
9986    /// close requires `AllChildrenPresent` (if not already sent), then
9987    /// `Release`, then close client end.
9988    ///
9989    /// If `Release` occurs after `AllChildrenPresent`, the children and all
9990    /// their constraints remain intact (just as they would if the
9991    /// `BufferCollectionTokenGroup` channel had remained open), and the client
9992    /// end close doesn't trigger buffer collection failure.
9993    ///
9994    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
9995    ///
9996    /// For brevity, the per-channel-protocol paragraphs above ignore the
9997    /// separate failure domain created by
9998    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
9999    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
10000    /// unexpectedly closes (without `Release` first) and that client end is
10001    /// under a failure domain, instead of failing the whole buffer collection,
10002    /// the failure domain is failed, but the buffer collection itself is
10003    /// isolated from failure of the failure domain. Such failure domains can be
10004    /// nested, in which case only the inner-most failure domain in which the
10005    /// `Node` resides fails.
10006    pub fn r#release(&self) -> Result<(), fidl::Error> {
10007        BufferCollectionTokenGroupProxyInterface::r#release(self)
10008    }
10009
10010    /// Set a name for VMOs in this buffer collection.
10011    ///
10012    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
10013    /// will be truncated to fit. The name of the vmo will be suffixed with the
10014    /// buffer index within the collection (if the suffix fits within
10015    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
10016    /// listed in the inspect data.
10017    ///
10018    /// The name only affects VMOs allocated after the name is set; this call
10019    /// does not rename existing VMOs. If multiple clients set different names
10020    /// then the larger priority value will win. Setting a new name with the
10021    /// same priority as a prior name doesn't change the name.
10022    ///
10023    /// All table fields are currently required.
10024    ///
10025    /// + request `priority` The name is only set if this is the first `SetName`
10026    ///   or if `priority` is greater than any previous `priority` value in
10027    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
10028    /// + request `name` The name for VMOs created under this buffer collection.
10029    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
10030        BufferCollectionTokenGroupProxyInterface::r#set_name(self, payload)
10031    }
10032
10033    /// Set information about the current client that can be used by sysmem to
10034    /// help diagnose leaking memory and allocation stalls waiting for a
10035    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
10036    ///
10037    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
10038    /// `Node`(s) derived from this `Node`, unless overriden by
10039    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
10040    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
10041    ///
10042    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
10043    /// `Allocator` is the most efficient way to ensure that all
10044    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
10045    /// set, and is also more efficient than separately sending the same debug
10046    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
10047    /// created [`fuchsia.sysmem2/Node`].
10048    ///
10049    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
10050    /// indicate which client is closing their channel first, leading to subtree
10051    /// failure (which can be normal if the purpose of the subtree is over, but
10052    /// if happening earlier than expected, the client-channel-specific name can
10053    /// help diagnose where the failure is first coming from, from sysmem's
10054    /// point of view).
10055    ///
10056    /// All table fields are currently required.
10057    ///
10058    /// + request `name` This can be an arbitrary string, but the current
10059    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
10060    /// + request `id` This can be an arbitrary id, but the current process ID
10061    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
10062    pub fn r#set_debug_client_info(
10063        &self,
10064        mut payload: &NodeSetDebugClientInfoRequest,
10065    ) -> Result<(), fidl::Error> {
10066        BufferCollectionTokenGroupProxyInterface::r#set_debug_client_info(self, payload)
10067    }
10068
10069    /// Sysmem logs a warning if sysmem hasn't seen
10070    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
10071    /// within 5 seconds after creation of a new collection.
10072    ///
10073    /// Clients can call this method to change when the log is printed. If
10074    /// multiple client set the deadline, it's unspecified which deadline will
10075    /// take effect.
10076    ///
10077    /// In most cases the default works well.
10078    ///
10079    /// All table fields are currently required.
10080    ///
10081    /// + request `deadline` The time at which sysmem will start trying to log
10082    ///   the warning, unless all constraints are with sysmem by then.
10083    pub fn r#set_debug_timeout_log_deadline(
10084        &self,
10085        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
10086    ) -> Result<(), fidl::Error> {
10087        BufferCollectionTokenGroupProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
10088    }
10089
10090    /// This enables verbose logging for the buffer collection.
10091    ///
10092    /// Verbose logging includes constraints set via
10093    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
10094    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
10095    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
10096    /// the tree of `Node`(s).
10097    ///
10098    /// Normally sysmem prints only a single line complaint when aggregation
10099    /// fails, with just the specific detailed reason that aggregation failed,
10100    /// with little surrounding context.  While this is often enough to diagnose
10101    /// a problem if only a small change was made and everything was working
10102    /// before the small change, it's often not particularly helpful for getting
10103    /// a new buffer collection to work for the first time.  Especially with
10104    /// more complex trees of nodes, involving things like
10105    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
10106    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
10107    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
10108    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
10109    /// looks like and why it's failing a logical allocation, or why a tree or
10110    /// subtree is failing sooner than expected.
10111    ///
10112    /// The intent of the extra logging is to be acceptable from a performance
10113    /// point of view, under the assumption that verbose logging is only enabled
10114    /// on a low number of buffer collections. If we're not tracking down a bug,
10115    /// we shouldn't send this message.
10116    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
10117        BufferCollectionTokenGroupProxyInterface::r#set_verbose_logging(self)
10118    }
10119
10120    /// This gets a handle that can be used as a parameter to
10121    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
10122    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
10123    /// client obtained this handle from this `Node`.
10124    ///
10125    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
10126    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
10127    /// despite the two calls typically being on different channels.
10128    ///
10129    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
10130    ///
10131    /// All table fields are currently required.
10132    ///
10133    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
10134    ///   different `Node` channel, to prove that the client obtained the handle
10135    ///   from this `Node`.
10136    pub fn r#get_node_ref(
10137        &self,
10138    ) -> fidl::client::QueryResponseFut<
10139        NodeGetNodeRefResponse,
10140        fidl::encoding::DefaultFuchsiaResourceDialect,
10141    > {
10142        BufferCollectionTokenGroupProxyInterface::r#get_node_ref(self)
10143    }
10144
10145    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
10146    /// rooted at a different child token of a common parent
10147    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
10148    /// passed-in `node_ref`.
10149    ///
10150    /// This call is for assisting with admission control de-duplication, and
10151    /// with debugging.
10152    ///
10153    /// The `node_ref` must be obtained using
10154    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
10155    ///
10156    /// The `node_ref` can be a duplicated handle; it's not necessary to call
10157    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
10158    ///
10159    /// If a calling token may not actually be a valid token at all due to a
10160    /// potentially hostile/untrusted provider of the token, call
10161    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
10162    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
10163    /// never responds due to a calling token not being a real token (not really
10164    /// talking to sysmem).  Another option is to call
10165    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
10166    /// which also validates the token along with converting it to a
10167    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
10168    ///
10169    /// All table fields are currently required.
10170    ///
10171    /// - response `is_alternate`
10172    ///   - true: The first parent node in common between the calling node and
10173    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
10174    ///     that the calling `Node` and the `node_ref` `Node` will not have both
10175    ///     their constraints apply - rather sysmem will choose one or the other
10176    ///     of the constraints - never both.  This is because only one child of
10177    ///     a `BufferCollectionTokenGroup` is selected during logical
10178    ///     allocation, with only that one child's subtree contributing to
10179    ///     constraints aggregation.
10180    ///   - false: The first parent node in common between the calling `Node`
10181    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
10182    ///     Currently, this means the first parent node in common is a
10183    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
10184    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
10185    ///     `Node` may have both their constraints apply during constraints
10186    ///     aggregation of the logical allocation, if both `Node`(s) are
10187    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
10188    ///     this case, there is no `BufferCollectionTokenGroup` that will
10189    ///     directly prevent the two `Node`(s) from both being selected and
10190    ///     their constraints both aggregated, but even when false, one or both
10191    ///     `Node`(s) may still be eliminated from consideration if one or both
10192    ///     `Node`(s) has a direct or indirect parent
10193    ///     `BufferCollectionTokenGroup` which selects a child subtree other
10194    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
10195    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
10196    ///   associated with the same buffer collection as the calling `Node`.
10197    ///   Another reason for this error is if the `node_ref` is an
10198    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
10199    ///   a real `node_ref` obtained from `GetNodeRef`.
10200    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
10201    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
10202    ///   the needed rights expected on a real `node_ref`.
10203    /// * No other failing status codes are returned by this call.  However,
10204    ///   sysmem may add additional codes in future, so the client should have
10205    ///   sensible default handling for any failing status code.
10206    pub fn r#is_alternate_for(
10207        &self,
10208        mut payload: NodeIsAlternateForRequest,
10209    ) -> fidl::client::QueryResponseFut<
10210        NodeIsAlternateForResult,
10211        fidl::encoding::DefaultFuchsiaResourceDialect,
10212    > {
10213        BufferCollectionTokenGroupProxyInterface::r#is_alternate_for(self, payload)
10214    }
10215
10216    /// Get the buffer collection ID. This ID is also available from
10217    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
10218    /// within the collection).
10219    ///
10220    /// This call is mainly useful in situations where we can't convey a
10221    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
10222    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
10223    /// handle, which can be joined back up with a `BufferCollection` client end
10224    /// that was created via a different path. Prefer to convey a
10225    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
10226    ///
10227    /// Trusting a `buffer_collection_id` value from a source other than sysmem
10228    /// is analogous to trusting a koid value from a source other than zircon.
10229    /// Both should be avoided unless really necessary, and both require
10230    /// caution. In some situations it may be reasonable to refer to a
10231    /// pre-established `BufferCollection` by `buffer_collection_id` via a
10232    /// protocol for efficiency reasons, but an incoming value purporting to be
10233    /// a `buffer_collection_id` is not sufficient alone to justify granting the
10234    /// sender of the `buffer_collection_id` any capability. The sender must
10235    /// first prove to a receiver that the sender has/had a VMO or has/had a
10236    /// `BufferCollectionToken` to the same collection by sending a handle that
10237    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
10238    /// `buffer_collection_id` value. The receiver should take care to avoid
10239    /// assuming that a sender had a `BufferCollectionToken` in cases where the
10240    /// sender has only proven that the sender had a VMO.
10241    ///
10242    /// - response `buffer_collection_id` This ID is unique per buffer
10243    ///   collection per boot. Each buffer is uniquely identified by the
10244    ///   `buffer_collection_id` and `buffer_index` together.
10245    pub fn r#get_buffer_collection_id(
10246        &self,
10247    ) -> fidl::client::QueryResponseFut<
10248        NodeGetBufferCollectionIdResponse,
10249        fidl::encoding::DefaultFuchsiaResourceDialect,
10250    > {
10251        BufferCollectionTokenGroupProxyInterface::r#get_buffer_collection_id(self)
10252    }
10253
10254    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
10255    /// created after this message to weak, which means that a client's `Node`
10256    /// client end (or a child created after this message) is not alone
10257    /// sufficient to keep allocated VMOs alive.
10258    ///
10259    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
10260    /// `close_weak_asap`.
10261    ///
10262    /// This message is only permitted before the `Node` becomes ready for
10263    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
10264    ///   * `BufferCollectionToken`: any time
10265    ///   * `BufferCollection`: before `SetConstraints`
10266    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
10267    ///
10268    /// Currently, no conversion from strong `Node` to weak `Node` after ready
10269    /// for allocation is provided, but a client can simulate that by creating
10270    /// an additional `Node` before allocation and setting that additional
10271    /// `Node` to weak, and then potentially at some point later sending
10272    /// `Release` and closing the client end of the client's strong `Node`, but
10273    /// keeping the client's weak `Node`.
10274    ///
10275    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
10276    /// collection failure (all `Node` client end(s) will see
10277    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
10278    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
10279    /// this situation until all `Node`(s) are ready for allocation. For initial
10280    /// allocation to succeed, at least one strong `Node` is required to exist
10281    /// at allocation time, but after that client receives VMO handles, that
10282    /// client can `BufferCollection.Release` and close the client end without
10283    /// causing this type of failure.
10284    ///
10285    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
10286    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
10287    /// separately as appropriate.
10288    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
10289        BufferCollectionTokenGroupProxyInterface::r#set_weak(self)
10290    }
10291
10292    /// This indicates to sysmem that the client is prepared to pay attention to
10293    /// `close_weak_asap`.
10294    ///
10295    /// If sent, this message must be before
10296    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
10297    ///
10298    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
10299    /// send this message before `WaitForAllBuffersAllocated`, or a parent
10300    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
10301    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
10302    /// trigger buffer collection failure.
10303    ///
10304    /// This message is necessary because weak sysmem VMOs have not always been
10305    /// a thing, so older clients are not aware of the need to pay attention to
10306    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
10307    /// sysmem weak VMO handles asap. By having this message and requiring
10308    /// participants to indicate their acceptance of this aspect of the overall
10309    /// protocol, we avoid situations where an older client is delivered a weak
10310    /// VMO without any way for sysmem to get that VMO to close quickly later
10311    /// (and on a per-buffer basis).
10312    ///
10313    /// A participant that doesn't handle `close_weak_asap` and also doesn't
10314    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
10315    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
10316    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
10317    /// same participant has a child/delegate which does retrieve VMOs, that
10318    /// child/delegate will need to send `SetWeakOk` before
10319    /// `WaitForAllBuffersAllocated`.
10320    ///
10321    /// + request `for_child_nodes_also` If present and true, this means direct
10322    ///   child nodes of this node created after this message plus all
10323    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
10324    ///   those nodes. Any child node of this node that was created before this
10325    ///   message is not included. This setting is "sticky" in the sense that a
10326    ///   subsequent `SetWeakOk` without this bool set to true does not reset
10327    ///   the server-side bool. If this creates a problem for a participant, a
10328    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
10329    ///   tokens instead, as appropriate. A participant should only set
10330    ///   `for_child_nodes_also` true if the participant can really promise to
10331    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
10332    ///   weak VMO handles held by participants holding the corresponding child
10333    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
10334    ///   which are using sysmem(1) can be weak, despite the clients of those
10335    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
10336    ///   direct way to find out about `close_weak_asap`. This only applies to
10337    ///   descendents of this `Node` which are using sysmem(1), not to this
10338    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
10339    ///   token, which will fail allocation unless an ancestor of this `Node`
10340    ///   specified `for_child_nodes_also` true.
10341    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
10342        BufferCollectionTokenGroupProxyInterface::r#set_weak_ok(self, payload)
10343    }
10344
10345    /// The server_end will be closed after this `Node` and any child nodes have
10346    /// have released their buffer counts, making those counts available for
10347    /// reservation by a different `Node` via
10348    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
10349    ///
10350    /// The `Node` buffer counts may not be released until the entire tree of
10351    /// `Node`(s) is closed or failed, because
10352    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
10353    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
10354    /// `Node` buffer counts remain reserved until the orphaned node is later
10355    /// cleaned up.
10356    ///
10357    /// If the `Node` exceeds a fairly large number of attached eventpair server
10358    /// ends, a log message will indicate this and the `Node` (and the
10359    /// appropriate) sub-tree will fail.
10360    ///
10361    /// The `server_end` will remain open when
10362    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
10363    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
10364    /// [`fuchsia.sysmem2/BufferCollection`].
10365    ///
10366    /// This message can also be used with a
10367    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
10368    pub fn r#attach_node_tracking(
10369        &self,
10370        mut payload: NodeAttachNodeTrackingRequest,
10371    ) -> Result<(), fidl::Error> {
10372        BufferCollectionTokenGroupProxyInterface::r#attach_node_tracking(self, payload)
10373    }
10374
10375    /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
10376    /// (including its children) will be selected during allocation (or logical
10377    /// allocation).
10378    ///
10379    /// Before passing the client end of this token to
10380    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
10381    /// [`fuchsia.sysmem2/Node.Sync`] after
10382    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
10383    /// Or the client can use
10384    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
10385    /// essentially includes the `Sync`.
10386    ///
10387    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
10388    /// fail the group's subtree and close the connection.
10389    ///
10390    /// After all children have been created, send AllChildrenPresent.
10391    ///
10392    /// + request `token_request` The server end of the new token channel.
10393    /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
10394    ///   token allows the holder to get the same rights to buffers as the
10395    ///   parent token (of the group) had. When the value isn't
10396    ///   ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
10397    ///   bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
10398    ///   for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
10399    ///   causes subtree failure.
10400    pub fn r#create_child(
10401        &self,
10402        mut payload: BufferCollectionTokenGroupCreateChildRequest,
10403    ) -> Result<(), fidl::Error> {
10404        BufferCollectionTokenGroupProxyInterface::r#create_child(self, payload)
10405    }
10406
10407    /// Create 1 or more child tokens at once, synchronously.  In contrast to
10408    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
10409    /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
10410    /// of a returned token to
10411    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
10412    ///
10413    /// The lower-index child tokens are higher priority (attempted sooner) than
10414    /// higher-index child tokens.
10415    ///
10416    /// As per all child tokens, successful aggregation will choose exactly one
10417    /// child among all created children (across all children created across
10418    /// potentially multiple calls to
10419    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
10420    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
10421    ///
10422    /// The maximum permissible total number of children per group, and total
10423    /// number of nodes in an overall tree (from the root) are capped to limits
10424    /// which are not configurable via these protocols.
10425    ///
10426    /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
10427    /// this will fail the group's subtree and close the connection.
10428    ///
10429    /// After all children have been created, send AllChildrenPresent.
10430    ///
10431    /// + request `rights_attentuation_masks` The size of the
10432    ///   `rights_attentuation_masks` determines the number of created child
10433    ///   tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
10434    ///   The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
10435    ///   other value, each 0 bit in the mask attenuates that right.
10436    /// - response `tokens` The created child tokens.
10437    pub fn r#create_children_sync(
10438        &self,
10439        mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
10440    ) -> fidl::client::QueryResponseFut<
10441        BufferCollectionTokenGroupCreateChildrenSyncResponse,
10442        fidl::encoding::DefaultFuchsiaResourceDialect,
10443    > {
10444        BufferCollectionTokenGroupProxyInterface::r#create_children_sync(self, payload)
10445    }
10446
10447    /// Indicate that no more children will be created.
10448    ///
10449    /// After creating all children, the client should send
10450    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
10451    /// inform sysmem that no more children will be created, so that sysmem can
10452    /// know when it's ok to start aggregating constraints.
10453    ///
10454    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
10455    /// fail the group's subtree and close the connection.
10456    ///
10457    /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
10458    /// after `AllChildrenPresent`, else failure of the group's subtree will be
10459    /// triggered. This is intentionally not analogous to how `Release` without
10460    /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
10461    /// subtree failure.
10462    pub fn r#all_children_present(&self) -> Result<(), fidl::Error> {
10463        BufferCollectionTokenGroupProxyInterface::r#all_children_present(self)
10464    }
10465}
10466
10467impl BufferCollectionTokenGroupProxyInterface for BufferCollectionTokenGroupProxy {
10468    type SyncResponseFut =
10469        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
10470    fn r#sync(&self) -> Self::SyncResponseFut {
10471        fn _decode(
10472            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10473        ) -> Result<(), fidl::Error> {
10474            let _response = fidl::client::decode_transaction_body::<
10475                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
10476                fidl::encoding::DefaultFuchsiaResourceDialect,
10477                0x11ac2555cf575b54,
10478            >(_buf?)?
10479            .into_result::<BufferCollectionTokenGroupMarker>("sync")?;
10480            Ok(_response)
10481        }
10482        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
10483            (),
10484            0x11ac2555cf575b54,
10485            fidl::encoding::DynamicFlags::FLEXIBLE,
10486            _decode,
10487        )
10488    }
10489
10490    fn r#release(&self) -> Result<(), fidl::Error> {
10491        self.client.send::<fidl::encoding::EmptyPayload>(
10492            (),
10493            0x6a5cae7d6d6e04c6,
10494            fidl::encoding::DynamicFlags::FLEXIBLE,
10495        )
10496    }
10497
10498    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
10499        self.client.send::<NodeSetNameRequest>(
10500            payload,
10501            0xb41f1624f48c1e9,
10502            fidl::encoding::DynamicFlags::FLEXIBLE,
10503        )
10504    }
10505
10506    fn r#set_debug_client_info(
10507        &self,
10508        mut payload: &NodeSetDebugClientInfoRequest,
10509    ) -> Result<(), fidl::Error> {
10510        self.client.send::<NodeSetDebugClientInfoRequest>(
10511            payload,
10512            0x5cde8914608d99b1,
10513            fidl::encoding::DynamicFlags::FLEXIBLE,
10514        )
10515    }
10516
10517    fn r#set_debug_timeout_log_deadline(
10518        &self,
10519        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
10520    ) -> Result<(), fidl::Error> {
10521        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
10522            payload,
10523            0x716b0af13d5c0806,
10524            fidl::encoding::DynamicFlags::FLEXIBLE,
10525        )
10526    }
10527
10528    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
10529        self.client.send::<fidl::encoding::EmptyPayload>(
10530            (),
10531            0x5209c77415b4dfad,
10532            fidl::encoding::DynamicFlags::FLEXIBLE,
10533        )
10534    }
10535
10536    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
10537        NodeGetNodeRefResponse,
10538        fidl::encoding::DefaultFuchsiaResourceDialect,
10539    >;
10540    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
10541        fn _decode(
10542            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10543        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
10544            let _response = fidl::client::decode_transaction_body::<
10545                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
10546                fidl::encoding::DefaultFuchsiaResourceDialect,
10547                0x5b3d0e51614df053,
10548            >(_buf?)?
10549            .into_result::<BufferCollectionTokenGroupMarker>("get_node_ref")?;
10550            Ok(_response)
10551        }
10552        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
10553            (),
10554            0x5b3d0e51614df053,
10555            fidl::encoding::DynamicFlags::FLEXIBLE,
10556            _decode,
10557        )
10558    }
10559
10560    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
10561        NodeIsAlternateForResult,
10562        fidl::encoding::DefaultFuchsiaResourceDialect,
10563    >;
10564    fn r#is_alternate_for(
10565        &self,
10566        mut payload: NodeIsAlternateForRequest,
10567    ) -> Self::IsAlternateForResponseFut {
10568        fn _decode(
10569            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10570        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
10571            let _response = fidl::client::decode_transaction_body::<
10572                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
10573                fidl::encoding::DefaultFuchsiaResourceDialect,
10574                0x3a58e00157e0825,
10575            >(_buf?)?
10576            .into_result::<BufferCollectionTokenGroupMarker>("is_alternate_for")?;
10577            Ok(_response.map(|x| x))
10578        }
10579        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
10580            &mut payload,
10581            0x3a58e00157e0825,
10582            fidl::encoding::DynamicFlags::FLEXIBLE,
10583            _decode,
10584        )
10585    }
10586
10587    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
10588        NodeGetBufferCollectionIdResponse,
10589        fidl::encoding::DefaultFuchsiaResourceDialect,
10590    >;
10591    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
10592        fn _decode(
10593            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10594        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
10595            let _response = fidl::client::decode_transaction_body::<
10596                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
10597                fidl::encoding::DefaultFuchsiaResourceDialect,
10598                0x77d19a494b78ba8c,
10599            >(_buf?)?
10600            .into_result::<BufferCollectionTokenGroupMarker>("get_buffer_collection_id")?;
10601            Ok(_response)
10602        }
10603        self.client.send_query_and_decode::<
10604            fidl::encoding::EmptyPayload,
10605            NodeGetBufferCollectionIdResponse,
10606        >(
10607            (),
10608            0x77d19a494b78ba8c,
10609            fidl::encoding::DynamicFlags::FLEXIBLE,
10610            _decode,
10611        )
10612    }
10613
10614    fn r#set_weak(&self) -> Result<(), fidl::Error> {
10615        self.client.send::<fidl::encoding::EmptyPayload>(
10616            (),
10617            0x22dd3ea514eeffe1,
10618            fidl::encoding::DynamicFlags::FLEXIBLE,
10619        )
10620    }
10621
10622    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
10623        self.client.send::<NodeSetWeakOkRequest>(
10624            &mut payload,
10625            0x38a44fc4d7724be9,
10626            fidl::encoding::DynamicFlags::FLEXIBLE,
10627        )
10628    }
10629
10630    fn r#attach_node_tracking(
10631        &self,
10632        mut payload: NodeAttachNodeTrackingRequest,
10633    ) -> Result<(), fidl::Error> {
10634        self.client.send::<NodeAttachNodeTrackingRequest>(
10635            &mut payload,
10636            0x3f22f2a293d3cdac,
10637            fidl::encoding::DynamicFlags::FLEXIBLE,
10638        )
10639    }
10640
10641    fn r#create_child(
10642        &self,
10643        mut payload: BufferCollectionTokenGroupCreateChildRequest,
10644    ) -> Result<(), fidl::Error> {
10645        self.client.send::<BufferCollectionTokenGroupCreateChildRequest>(
10646            &mut payload,
10647            0x41a0075d419f30c5,
10648            fidl::encoding::DynamicFlags::FLEXIBLE,
10649        )
10650    }
10651
10652    type CreateChildrenSyncResponseFut = fidl::client::QueryResponseFut<
10653        BufferCollectionTokenGroupCreateChildrenSyncResponse,
10654        fidl::encoding::DefaultFuchsiaResourceDialect,
10655    >;
10656    fn r#create_children_sync(
10657        &self,
10658        mut payload: &BufferCollectionTokenGroupCreateChildrenSyncRequest,
10659    ) -> Self::CreateChildrenSyncResponseFut {
10660        fn _decode(
10661            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
10662        ) -> Result<BufferCollectionTokenGroupCreateChildrenSyncResponse, fidl::Error> {
10663            let _response = fidl::client::decode_transaction_body::<
10664                fidl::encoding::FlexibleType<BufferCollectionTokenGroupCreateChildrenSyncResponse>,
10665                fidl::encoding::DefaultFuchsiaResourceDialect,
10666                0x15dea448c536070a,
10667            >(_buf?)?
10668            .into_result::<BufferCollectionTokenGroupMarker>("create_children_sync")?;
10669            Ok(_response)
10670        }
10671        self.client.send_query_and_decode::<
10672            BufferCollectionTokenGroupCreateChildrenSyncRequest,
10673            BufferCollectionTokenGroupCreateChildrenSyncResponse,
10674        >(
10675            payload,
10676            0x15dea448c536070a,
10677            fidl::encoding::DynamicFlags::FLEXIBLE,
10678            _decode,
10679        )
10680    }
10681
10682    fn r#all_children_present(&self) -> Result<(), fidl::Error> {
10683        self.client.send::<fidl::encoding::EmptyPayload>(
10684            (),
10685            0x5c327e4a23391312,
10686            fidl::encoding::DynamicFlags::FLEXIBLE,
10687        )
10688    }
10689}
10690
10691pub struct BufferCollectionTokenGroupEventStream {
10692    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
10693}
10694
10695impl std::marker::Unpin for BufferCollectionTokenGroupEventStream {}
10696
10697impl futures::stream::FusedStream for BufferCollectionTokenGroupEventStream {
10698    fn is_terminated(&self) -> bool {
10699        self.event_receiver.is_terminated()
10700    }
10701}
10702
10703impl futures::Stream for BufferCollectionTokenGroupEventStream {
10704    type Item = Result<BufferCollectionTokenGroupEvent, fidl::Error>;
10705
10706    fn poll_next(
10707        mut self: std::pin::Pin<&mut Self>,
10708        cx: &mut std::task::Context<'_>,
10709    ) -> std::task::Poll<Option<Self::Item>> {
10710        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
10711            &mut self.event_receiver,
10712            cx
10713        )?) {
10714            Some(buf) => std::task::Poll::Ready(Some(BufferCollectionTokenGroupEvent::decode(buf))),
10715            None => std::task::Poll::Ready(None),
10716        }
10717    }
10718}
10719
10720#[derive(Debug)]
10721pub enum BufferCollectionTokenGroupEvent {
10722    #[non_exhaustive]
10723    _UnknownEvent {
10724        /// Ordinal of the event that was sent.
10725        ordinal: u64,
10726    },
10727}
10728
10729impl BufferCollectionTokenGroupEvent {
10730    /// Decodes a message buffer as a [`BufferCollectionTokenGroupEvent`].
10731    fn decode(
10732        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
10733    ) -> Result<BufferCollectionTokenGroupEvent, fidl::Error> {
10734        let (bytes, _handles) = buf.split_mut();
10735        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
10736        debug_assert_eq!(tx_header.tx_id, 0);
10737        match tx_header.ordinal {
10738            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
10739                Ok(BufferCollectionTokenGroupEvent::_UnknownEvent {
10740                    ordinal: tx_header.ordinal,
10741                })
10742            }
10743            _ => Err(fidl::Error::UnknownOrdinal {
10744                ordinal: tx_header.ordinal,
10745                protocol_name: <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
10746            })
10747        }
10748    }
10749}
10750
10751/// A Stream of incoming requests for fuchsia.sysmem2/BufferCollectionTokenGroup.
10752pub struct BufferCollectionTokenGroupRequestStream {
10753    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
10754    is_terminated: bool,
10755}
10756
10757impl std::marker::Unpin for BufferCollectionTokenGroupRequestStream {}
10758
10759impl futures::stream::FusedStream for BufferCollectionTokenGroupRequestStream {
10760    fn is_terminated(&self) -> bool {
10761        self.is_terminated
10762    }
10763}
10764
10765impl fidl::endpoints::RequestStream for BufferCollectionTokenGroupRequestStream {
10766    type Protocol = BufferCollectionTokenGroupMarker;
10767    type ControlHandle = BufferCollectionTokenGroupControlHandle;
10768
10769    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
10770        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
10771    }
10772
10773    fn control_handle(&self) -> Self::ControlHandle {
10774        BufferCollectionTokenGroupControlHandle { inner: self.inner.clone() }
10775    }
10776
10777    fn into_inner(
10778        self,
10779    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
10780    {
10781        (self.inner, self.is_terminated)
10782    }
10783
10784    fn from_inner(
10785        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
10786        is_terminated: bool,
10787    ) -> Self {
10788        Self { inner, is_terminated }
10789    }
10790}
10791
10792impl futures::Stream for BufferCollectionTokenGroupRequestStream {
10793    type Item = Result<BufferCollectionTokenGroupRequest, fidl::Error>;
10794
10795    fn poll_next(
10796        mut self: std::pin::Pin<&mut Self>,
10797        cx: &mut std::task::Context<'_>,
10798    ) -> std::task::Poll<Option<Self::Item>> {
10799        let this = &mut *self;
10800        if this.inner.check_shutdown(cx) {
10801            this.is_terminated = true;
10802            return std::task::Poll::Ready(None);
10803        }
10804        if this.is_terminated {
10805            panic!("polled BufferCollectionTokenGroupRequestStream after completion");
10806        }
10807        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
10808            |bytes, handles| {
10809                match this.inner.channel().read_etc(cx, bytes, handles) {
10810                    std::task::Poll::Ready(Ok(())) => {}
10811                    std::task::Poll::Pending => return std::task::Poll::Pending,
10812                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
10813                        this.is_terminated = true;
10814                        return std::task::Poll::Ready(None);
10815                    }
10816                    std::task::Poll::Ready(Err(e)) => {
10817                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
10818                            e.into(),
10819                        ))))
10820                    }
10821                }
10822
10823                // A message has been received from the channel
10824                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
10825
10826                std::task::Poll::Ready(Some(match header.ordinal {
10827                0x11ac2555cf575b54 => {
10828                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10829                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10830                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10831                    let control_handle = BufferCollectionTokenGroupControlHandle {
10832                        inner: this.inner.clone(),
10833                    };
10834                    Ok(BufferCollectionTokenGroupRequest::Sync {
10835                        responder: BufferCollectionTokenGroupSyncResponder {
10836                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10837                            tx_id: header.tx_id,
10838                        },
10839                    })
10840                }
10841                0x6a5cae7d6d6e04c6 => {
10842                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10843                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10844                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10845                    let control_handle = BufferCollectionTokenGroupControlHandle {
10846                        inner: this.inner.clone(),
10847                    };
10848                    Ok(BufferCollectionTokenGroupRequest::Release {
10849                        control_handle,
10850                    })
10851                }
10852                0xb41f1624f48c1e9 => {
10853                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10854                    let mut req = fidl::new_empty!(NodeSetNameRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10855                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
10856                    let control_handle = BufferCollectionTokenGroupControlHandle {
10857                        inner: this.inner.clone(),
10858                    };
10859                    Ok(BufferCollectionTokenGroupRequest::SetName {payload: req,
10860                        control_handle,
10861                    })
10862                }
10863                0x5cde8914608d99b1 => {
10864                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10865                    let mut req = fidl::new_empty!(NodeSetDebugClientInfoRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10866                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
10867                    let control_handle = BufferCollectionTokenGroupControlHandle {
10868                        inner: this.inner.clone(),
10869                    };
10870                    Ok(BufferCollectionTokenGroupRequest::SetDebugClientInfo {payload: req,
10871                        control_handle,
10872                    })
10873                }
10874                0x716b0af13d5c0806 => {
10875                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10876                    let mut req = fidl::new_empty!(NodeSetDebugTimeoutLogDeadlineRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10877                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
10878                    let control_handle = BufferCollectionTokenGroupControlHandle {
10879                        inner: this.inner.clone(),
10880                    };
10881                    Ok(BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline {payload: req,
10882                        control_handle,
10883                    })
10884                }
10885                0x5209c77415b4dfad => {
10886                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10887                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10888                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10889                    let control_handle = BufferCollectionTokenGroupControlHandle {
10890                        inner: this.inner.clone(),
10891                    };
10892                    Ok(BufferCollectionTokenGroupRequest::SetVerboseLogging {
10893                        control_handle,
10894                    })
10895                }
10896                0x5b3d0e51614df053 => {
10897                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10898                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10899                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10900                    let control_handle = BufferCollectionTokenGroupControlHandle {
10901                        inner: this.inner.clone(),
10902                    };
10903                    Ok(BufferCollectionTokenGroupRequest::GetNodeRef {
10904                        responder: BufferCollectionTokenGroupGetNodeRefResponder {
10905                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10906                            tx_id: header.tx_id,
10907                        },
10908                    })
10909                }
10910                0x3a58e00157e0825 => {
10911                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10912                    let mut req = fidl::new_empty!(NodeIsAlternateForRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10913                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
10914                    let control_handle = BufferCollectionTokenGroupControlHandle {
10915                        inner: this.inner.clone(),
10916                    };
10917                    Ok(BufferCollectionTokenGroupRequest::IsAlternateFor {payload: req,
10918                        responder: BufferCollectionTokenGroupIsAlternateForResponder {
10919                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10920                            tx_id: header.tx_id,
10921                        },
10922                    })
10923                }
10924                0x77d19a494b78ba8c => {
10925                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10926                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10927                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10928                    let control_handle = BufferCollectionTokenGroupControlHandle {
10929                        inner: this.inner.clone(),
10930                    };
10931                    Ok(BufferCollectionTokenGroupRequest::GetBufferCollectionId {
10932                        responder: BufferCollectionTokenGroupGetBufferCollectionIdResponder {
10933                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10934                            tx_id: header.tx_id,
10935                        },
10936                    })
10937                }
10938                0x22dd3ea514eeffe1 => {
10939                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10940                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10941                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
10942                    let control_handle = BufferCollectionTokenGroupControlHandle {
10943                        inner: this.inner.clone(),
10944                    };
10945                    Ok(BufferCollectionTokenGroupRequest::SetWeak {
10946                        control_handle,
10947                    })
10948                }
10949                0x38a44fc4d7724be9 => {
10950                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10951                    let mut req = fidl::new_empty!(NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10952                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
10953                    let control_handle = BufferCollectionTokenGroupControlHandle {
10954                        inner: this.inner.clone(),
10955                    };
10956                    Ok(BufferCollectionTokenGroupRequest::SetWeakOk {payload: req,
10957                        control_handle,
10958                    })
10959                }
10960                0x3f22f2a293d3cdac => {
10961                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10962                    let mut req = fidl::new_empty!(NodeAttachNodeTrackingRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10963                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
10964                    let control_handle = BufferCollectionTokenGroupControlHandle {
10965                        inner: this.inner.clone(),
10966                    };
10967                    Ok(BufferCollectionTokenGroupRequest::AttachNodeTracking {payload: req,
10968                        control_handle,
10969                    })
10970                }
10971                0x41a0075d419f30c5 => {
10972                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10973                    let mut req = fidl::new_empty!(BufferCollectionTokenGroupCreateChildRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10974                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenGroupCreateChildRequest>(&header, _body_bytes, handles, &mut req)?;
10975                    let control_handle = BufferCollectionTokenGroupControlHandle {
10976                        inner: this.inner.clone(),
10977                    };
10978                    Ok(BufferCollectionTokenGroupRequest::CreateChild {payload: req,
10979                        control_handle,
10980                    })
10981                }
10982                0x15dea448c536070a => {
10983                    header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
10984                    let mut req = fidl::new_empty!(BufferCollectionTokenGroupCreateChildrenSyncRequest, fidl::encoding::DefaultFuchsiaResourceDialect);
10985                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<BufferCollectionTokenGroupCreateChildrenSyncRequest>(&header, _body_bytes, handles, &mut req)?;
10986                    let control_handle = BufferCollectionTokenGroupControlHandle {
10987                        inner: this.inner.clone(),
10988                    };
10989                    Ok(BufferCollectionTokenGroupRequest::CreateChildrenSync {payload: req,
10990                        responder: BufferCollectionTokenGroupCreateChildrenSyncResponder {
10991                            control_handle: std::mem::ManuallyDrop::new(control_handle),
10992                            tx_id: header.tx_id,
10993                        },
10994                    })
10995                }
10996                0x5c327e4a23391312 => {
10997                    header.validate_request_tx_id(fidl::MethodType::OneWay)?;
10998                    let mut req = fidl::new_empty!(fidl::encoding::EmptyPayload, fidl::encoding::DefaultFuchsiaResourceDialect);
10999                    fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
11000                    let control_handle = BufferCollectionTokenGroupControlHandle {
11001                        inner: this.inner.clone(),
11002                    };
11003                    Ok(BufferCollectionTokenGroupRequest::AllChildrenPresent {
11004                        control_handle,
11005                    })
11006                }
11007                _ if header.tx_id == 0 && header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
11008                    Ok(BufferCollectionTokenGroupRequest::_UnknownMethod {
11009                        ordinal: header.ordinal,
11010                        control_handle: BufferCollectionTokenGroupControlHandle { inner: this.inner.clone() },
11011                        method_type: fidl::MethodType::OneWay,
11012                    })
11013                }
11014                _ if header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
11015                    this.inner.send_framework_err(
11016                        fidl::encoding::FrameworkErr::UnknownMethod,
11017                        header.tx_id,
11018                        header.ordinal,
11019                        header.dynamic_flags(),
11020                        (bytes, handles),
11021                    )?;
11022                    Ok(BufferCollectionTokenGroupRequest::_UnknownMethod {
11023                        ordinal: header.ordinal,
11024                        control_handle: BufferCollectionTokenGroupControlHandle { inner: this.inner.clone() },
11025                        method_type: fidl::MethodType::TwoWay,
11026                    })
11027                }
11028                _ => Err(fidl::Error::UnknownOrdinal {
11029                    ordinal: header.ordinal,
11030                    protocol_name: <BufferCollectionTokenGroupMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
11031                }),
11032            }))
11033            },
11034        )
11035    }
11036}
11037
11038/// The sysmem implementation is consistent with a logical / conceptual model of
11039/// allocation / logical allocation as follows:
11040///
11041/// As usual, a logical allocation considers either the root and all nodes with
11042/// connectivity to the root that don't transit a [`fuchsia.sysmem2/Node`]
11043/// created with [`fuchsia.sysmem2/BufferCollection.AttachToken`], or a subtree
11044/// rooted at an `AttachToken` `Node` and all `Node`(s) with connectivity to
11045/// that subtree that don't transit another `AttachToken`.  This is called the
11046/// logical allocation pruned subtree, or pruned subtree for short.
11047///
11048/// During constraints aggregation, each
11049/// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] will select a single child
11050/// `Node` among its direct children. The rest of the children will appear to
11051/// fail the logical allocation, while the selected child may succeed.
11052///
11053/// When more than one `BufferCollectionTokenGroup` exists in the overall
11054/// logical allocation pruned subtree, the relative priority between two groups
11055/// is equivalent to their ordering in a DFS pre-order iteration of the tree,
11056/// with parents higher priority than children, and left children higher
11057/// priority than right children.
11058///
11059/// When a particular child of a group is selected (whether provisionally during
11060/// a constraints aggregation attempt, or as a final selection), the
11061/// non-selection of other children of the group will "hide" any other groups
11062/// under those non-selected children.
11063///
11064/// Within a logical allocation, aggregation is attempted first by provisionally
11065/// selecting child 0 of the highest-priority group, and child 0 of the next
11066/// highest-priority group that isn't hidden by the provisional selections so
11067/// far, etc.
11068///
11069/// If that aggregation attempt fails, aggregation will be attempted with the
11070/// ordinal 0 child of all the same groups except the lowest priority non-hidden
11071/// group which will provisionally select its ordinal 1 child (and then child 2
11072/// and so on). If a new lowest-priority group is un-hidden as provisional
11073/// selections are updated, that newly un-hidden lowest-priority group has all
11074/// its children considered in order, before changing the provisional selection
11075/// in the former lowest-priority group. In terms of result, this is equivalent
11076/// to systematic enumeration of all possible combinations of choices in a
11077/// counting-like order updating the lowest-priority group the most often and
11078/// the highest-priority group the least often. Rather than actually attempting
11079/// aggregation with all the combinations, we can skip over combinations which
11080/// are redundant/equivalent due to hiding without any change to the result.
11081///
11082/// Attempted constraint aggregations of enumerated non-equivalent combinations
11083/// of choices continue in this manner until either (a) all aggregation attempts
11084/// fail in which case the overall logical allocation fails, or (b) until an
11085/// attempted aggregation succeeds, in which case buffer allocation (if needed;
11086/// if this is the pruned subtree rooted at the overall root `Node`) is
11087/// attempted once. If buffer allocation based on the first successful
11088/// constraints aggregation fails, the overall logical allocation fails (there
11089/// is no buffer allocation retry / re-attempt). If buffer allocation succeeds
11090/// (or is not needed due to being a pruned subtree that doesn't include the
11091/// root), the logical allocation succeeds.
11092///
11093/// If this prioritization scheme cannot reasonably work for your usage of
11094/// sysmem, please don't hesitate to contact sysmem folks to discuss potentially
11095/// adding a way to achieve what you need.
11096///
11097/// Please avoid creating a large number of `BufferCollectionTokenGroup`(s) per
11098/// logical allocation, especially with large number of children overall, and
11099/// especially in cases where aggregation may reasonably be expected to often
11100/// fail using ordinal 0 children and possibly with later children as well.
11101/// Sysmem mitigates potentially high time complexity of evaluating too many
11102/// child combinations/selections across too many groups by simply failing
11103/// logical allocation beyond a certain (fairly high, but not huge) max number
11104/// of considered group child combinations/selections. More advanced (and more
11105/// complicated) mitigation is not anticipated to be practically necessary or
11106/// worth the added complexity. Please contact sysmem folks if the max limit is
11107/// getting hit or if you anticipate it getting hit, to discuss potential
11108/// options.
11109///
11110/// Prefer to use multiple [`fuchsia.sysmem2/ImageFormatConstraints`] in a
11111/// single [`fuchsia.sysmem2/BufferCollectionConstraints`] when feasible (when a
11112/// participant just needs to express the ability to work with more than a
11113/// single [`fuchsia.images2/PixelFormat`], with sysmem choosing which
11114/// `PixelFormat` to use among those supported by all participants).
11115///
11116/// Similar to [`fuchsia.sysmem2/BufferCollectionToken`] and
11117/// [`fuchsia.sysmem2/BufferCollection`], closure of the
11118/// `BufferCollectionTokenGroup` channel without sending
11119/// [`fuchsia.sysmem2/Node.Release`] first will cause buffer collection failure
11120/// (or subtree failure if using
11121/// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
11122/// [`fuchsia.sysmem2/BufferCollection.AttachToken`] and the
11123/// `BufferCollectionTokenGroup` is part of a subtree under such a node that
11124/// doesn't propagate failure to its parent).
11125///
11126/// Epitaphs are not used in this protocol.
11127#[derive(Debug)]
11128pub enum BufferCollectionTokenGroupRequest {
11129    /// Ensure that previous messages have been received server side. This is
11130    /// particularly useful after previous messages that created new tokens,
11131    /// because a token must be known to the sysmem server before sending the
11132    /// token to another participant.
11133    ///
11134    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
11135    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
11136    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
11137    /// to mitigate the possibility of a hostile/fake
11138    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
11139    /// Another way is to pass the token to
11140    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
11141    /// the token as part of exchanging it for a
11142    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
11143    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
11144    /// of stalling.
11145    ///
11146    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
11147    /// and then starting and completing a `Sync`, it's then safe to send the
11148    /// `BufferCollectionToken` client ends to other participants knowing the
11149    /// server will recognize the tokens when they're sent by the other
11150    /// participants to sysmem in a
11151    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
11152    /// efficient way to create tokens while avoiding unnecessary round trips.
11153    ///
11154    /// Other options include waiting for each
11155    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
11156    /// individually (using separate call to `Sync` after each), or calling
11157    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
11158    /// converted to a `BufferCollection` via
11159    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
11160    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
11161    /// the sync step and can create multiple tokens at once.
11162    Sync { responder: BufferCollectionTokenGroupSyncResponder },
11163    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
11164    ///
11165    /// Normally a participant will convert a `BufferCollectionToken` into a
11166    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
11167    /// `Release` via the token (and then close the channel immediately or
11168    /// shortly later in response to server closing the server end), which
11169    /// avoids causing buffer collection failure. Without a prior `Release`,
11170    /// closing the `BufferCollectionToken` client end will cause buffer
11171    /// collection failure.
11172    ///
11173    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
11174    ///
11175    /// By default the server handles unexpected closure of a
11176    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
11177    /// first) by failing the buffer collection. Partly this is to expedite
11178    /// closing VMO handles to reclaim memory when any participant fails. If a
11179    /// participant would like to cleanly close a `BufferCollection` without
11180    /// causing buffer collection failure, the participant can send `Release`
11181    /// before closing the `BufferCollection` client end. The `Release` can
11182    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
11183    /// buffer collection won't require constraints from this node in order to
11184    /// allocate. If after `SetConstraints`, the constraints are retained and
11185    /// aggregated, despite the lack of `BufferCollection` connection at the
11186    /// time of constraints aggregation.
11187    ///
11188    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
11189    ///
11190    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
11191    /// end (without `Release` first) will trigger failure of the buffer
11192    /// collection. To close a `BufferCollectionTokenGroup` channel without
11193    /// failing the buffer collection, ensure that AllChildrenPresent() has been
11194    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
11195    /// client end.
11196    ///
11197    /// If `Release` occurs before
11198    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
11199    /// buffer collection will fail (triggered by reception of `Release` without
11200    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
11201    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
11202    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
11203    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
11204    /// close requires `AllChildrenPresent` (if not already sent), then
11205    /// `Release`, then close client end.
11206    ///
11207    /// If `Release` occurs after `AllChildrenPresent`, the children and all
11208    /// their constraints remain intact (just as they would if the
11209    /// `BufferCollectionTokenGroup` channel had remained open), and the client
11210    /// end close doesn't trigger buffer collection failure.
11211    ///
11212    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
11213    ///
11214    /// For brevity, the per-channel-protocol paragraphs above ignore the
11215    /// separate failure domain created by
11216    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
11217    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
11218    /// unexpectedly closes (without `Release` first) and that client end is
11219    /// under a failure domain, instead of failing the whole buffer collection,
11220    /// the failure domain is failed, but the buffer collection itself is
11221    /// isolated from failure of the failure domain. Such failure domains can be
11222    /// nested, in which case only the inner-most failure domain in which the
11223    /// `Node` resides fails.
11224    Release { control_handle: BufferCollectionTokenGroupControlHandle },
11225    /// Set a name for VMOs in this buffer collection.
11226    ///
11227    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
11228    /// will be truncated to fit. The name of the vmo will be suffixed with the
11229    /// buffer index within the collection (if the suffix fits within
11230    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
11231    /// listed in the inspect data.
11232    ///
11233    /// The name only affects VMOs allocated after the name is set; this call
11234    /// does not rename existing VMOs. If multiple clients set different names
11235    /// then the larger priority value will win. Setting a new name with the
11236    /// same priority as a prior name doesn't change the name.
11237    ///
11238    /// All table fields are currently required.
11239    ///
11240    /// + request `priority` The name is only set if this is the first `SetName`
11241    ///   or if `priority` is greater than any previous `priority` value in
11242    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
11243    /// + request `name` The name for VMOs created under this buffer collection.
11244    SetName { payload: NodeSetNameRequest, control_handle: BufferCollectionTokenGroupControlHandle },
11245    /// Set information about the current client that can be used by sysmem to
11246    /// help diagnose leaking memory and allocation stalls waiting for a
11247    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
11248    ///
11249    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
11250    /// `Node`(s) derived from this `Node`, unless overriden by
11251    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
11252    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
11253    ///
11254    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
11255    /// `Allocator` is the most efficient way to ensure that all
11256    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
11257    /// set, and is also more efficient than separately sending the same debug
11258    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
11259    /// created [`fuchsia.sysmem2/Node`].
11260    ///
11261    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
11262    /// indicate which client is closing their channel first, leading to subtree
11263    /// failure (which can be normal if the purpose of the subtree is over, but
11264    /// if happening earlier than expected, the client-channel-specific name can
11265    /// help diagnose where the failure is first coming from, from sysmem's
11266    /// point of view).
11267    ///
11268    /// All table fields are currently required.
11269    ///
11270    /// + request `name` This can be an arbitrary string, but the current
11271    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
11272    /// + request `id` This can be an arbitrary id, but the current process ID
11273    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
11274    SetDebugClientInfo {
11275        payload: NodeSetDebugClientInfoRequest,
11276        control_handle: BufferCollectionTokenGroupControlHandle,
11277    },
11278    /// Sysmem logs a warning if sysmem hasn't seen
11279    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
11280    /// within 5 seconds after creation of a new collection.
11281    ///
11282    /// Clients can call this method to change when the log is printed. If
11283    /// multiple client set the deadline, it's unspecified which deadline will
11284    /// take effect.
11285    ///
11286    /// In most cases the default works well.
11287    ///
11288    /// All table fields are currently required.
11289    ///
11290    /// + request `deadline` The time at which sysmem will start trying to log
11291    ///   the warning, unless all constraints are with sysmem by then.
11292    SetDebugTimeoutLogDeadline {
11293        payload: NodeSetDebugTimeoutLogDeadlineRequest,
11294        control_handle: BufferCollectionTokenGroupControlHandle,
11295    },
11296    /// This enables verbose logging for the buffer collection.
11297    ///
11298    /// Verbose logging includes constraints set via
11299    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
11300    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
11301    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
11302    /// the tree of `Node`(s).
11303    ///
11304    /// Normally sysmem prints only a single line complaint when aggregation
11305    /// fails, with just the specific detailed reason that aggregation failed,
11306    /// with little surrounding context.  While this is often enough to diagnose
11307    /// a problem if only a small change was made and everything was working
11308    /// before the small change, it's often not particularly helpful for getting
11309    /// a new buffer collection to work for the first time.  Especially with
11310    /// more complex trees of nodes, involving things like
11311    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
11312    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
11313    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
11314    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
11315    /// looks like and why it's failing a logical allocation, or why a tree or
11316    /// subtree is failing sooner than expected.
11317    ///
11318    /// The intent of the extra logging is to be acceptable from a performance
11319    /// point of view, under the assumption that verbose logging is only enabled
11320    /// on a low number of buffer collections. If we're not tracking down a bug,
11321    /// we shouldn't send this message.
11322    SetVerboseLogging { control_handle: BufferCollectionTokenGroupControlHandle },
11323    /// This gets a handle that can be used as a parameter to
11324    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
11325    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
11326    /// client obtained this handle from this `Node`.
11327    ///
11328    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
11329    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
11330    /// despite the two calls typically being on different channels.
11331    ///
11332    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
11333    ///
11334    /// All table fields are currently required.
11335    ///
11336    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
11337    ///   different `Node` channel, to prove that the client obtained the handle
11338    ///   from this `Node`.
11339    GetNodeRef { responder: BufferCollectionTokenGroupGetNodeRefResponder },
11340    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
11341    /// rooted at a different child token of a common parent
11342    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
11343    /// passed-in `node_ref`.
11344    ///
11345    /// This call is for assisting with admission control de-duplication, and
11346    /// with debugging.
11347    ///
11348    /// The `node_ref` must be obtained using
11349    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
11350    ///
11351    /// The `node_ref` can be a duplicated handle; it's not necessary to call
11352    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
11353    ///
11354    /// If a calling token may not actually be a valid token at all due to a
11355    /// potentially hostile/untrusted provider of the token, call
11356    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
11357    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
11358    /// never responds due to a calling token not being a real token (not really
11359    /// talking to sysmem).  Another option is to call
11360    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
11361    /// which also validates the token along with converting it to a
11362    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
11363    ///
11364    /// All table fields are currently required.
11365    ///
11366    /// - response `is_alternate`
11367    ///   - true: The first parent node in common between the calling node and
11368    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
11369    ///     that the calling `Node` and the `node_ref` `Node` will not have both
11370    ///     their constraints apply - rather sysmem will choose one or the other
11371    ///     of the constraints - never both.  This is because only one child of
11372    ///     a `BufferCollectionTokenGroup` is selected during logical
11373    ///     allocation, with only that one child's subtree contributing to
11374    ///     constraints aggregation.
11375    ///   - false: The first parent node in common between the calling `Node`
11376    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
11377    ///     Currently, this means the first parent node in common is a
11378    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
11379    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
11380    ///     `Node` may have both their constraints apply during constraints
11381    ///     aggregation of the logical allocation, if both `Node`(s) are
11382    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
11383    ///     this case, there is no `BufferCollectionTokenGroup` that will
11384    ///     directly prevent the two `Node`(s) from both being selected and
11385    ///     their constraints both aggregated, but even when false, one or both
11386    ///     `Node`(s) may still be eliminated from consideration if one or both
11387    ///     `Node`(s) has a direct or indirect parent
11388    ///     `BufferCollectionTokenGroup` which selects a child subtree other
11389    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
11390    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
11391    ///   associated with the same buffer collection as the calling `Node`.
11392    ///   Another reason for this error is if the `node_ref` is an
11393    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
11394    ///   a real `node_ref` obtained from `GetNodeRef`.
11395    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
11396    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
11397    ///   the needed rights expected on a real `node_ref`.
11398    /// * No other failing status codes are returned by this call.  However,
11399    ///   sysmem may add additional codes in future, so the client should have
11400    ///   sensible default handling for any failing status code.
11401    IsAlternateFor {
11402        payload: NodeIsAlternateForRequest,
11403        responder: BufferCollectionTokenGroupIsAlternateForResponder,
11404    },
11405    /// Get the buffer collection ID. This ID is also available from
11406    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
11407    /// within the collection).
11408    ///
11409    /// This call is mainly useful in situations where we can't convey a
11410    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
11411    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
11412    /// handle, which can be joined back up with a `BufferCollection` client end
11413    /// that was created via a different path. Prefer to convey a
11414    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
11415    ///
11416    /// Trusting a `buffer_collection_id` value from a source other than sysmem
11417    /// is analogous to trusting a koid value from a source other than zircon.
11418    /// Both should be avoided unless really necessary, and both require
11419    /// caution. In some situations it may be reasonable to refer to a
11420    /// pre-established `BufferCollection` by `buffer_collection_id` via a
11421    /// protocol for efficiency reasons, but an incoming value purporting to be
11422    /// a `buffer_collection_id` is not sufficient alone to justify granting the
11423    /// sender of the `buffer_collection_id` any capability. The sender must
11424    /// first prove to a receiver that the sender has/had a VMO or has/had a
11425    /// `BufferCollectionToken` to the same collection by sending a handle that
11426    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
11427    /// `buffer_collection_id` value. The receiver should take care to avoid
11428    /// assuming that a sender had a `BufferCollectionToken` in cases where the
11429    /// sender has only proven that the sender had a VMO.
11430    ///
11431    /// - response `buffer_collection_id` This ID is unique per buffer
11432    ///   collection per boot. Each buffer is uniquely identified by the
11433    ///   `buffer_collection_id` and `buffer_index` together.
11434    GetBufferCollectionId { responder: BufferCollectionTokenGroupGetBufferCollectionIdResponder },
11435    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
11436    /// created after this message to weak, which means that a client's `Node`
11437    /// client end (or a child created after this message) is not alone
11438    /// sufficient to keep allocated VMOs alive.
11439    ///
11440    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
11441    /// `close_weak_asap`.
11442    ///
11443    /// This message is only permitted before the `Node` becomes ready for
11444    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
11445    ///   * `BufferCollectionToken`: any time
11446    ///   * `BufferCollection`: before `SetConstraints`
11447    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
11448    ///
11449    /// Currently, no conversion from strong `Node` to weak `Node` after ready
11450    /// for allocation is provided, but a client can simulate that by creating
11451    /// an additional `Node` before allocation and setting that additional
11452    /// `Node` to weak, and then potentially at some point later sending
11453    /// `Release` and closing the client end of the client's strong `Node`, but
11454    /// keeping the client's weak `Node`.
11455    ///
11456    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
11457    /// collection failure (all `Node` client end(s) will see
11458    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
11459    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
11460    /// this situation until all `Node`(s) are ready for allocation. For initial
11461    /// allocation to succeed, at least one strong `Node` is required to exist
11462    /// at allocation time, but after that client receives VMO handles, that
11463    /// client can `BufferCollection.Release` and close the client end without
11464    /// causing this type of failure.
11465    ///
11466    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
11467    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
11468    /// separately as appropriate.
11469    SetWeak { control_handle: BufferCollectionTokenGroupControlHandle },
11470    /// This indicates to sysmem that the client is prepared to pay attention to
11471    /// `close_weak_asap`.
11472    ///
11473    /// If sent, this message must be before
11474    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
11475    ///
11476    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
11477    /// send this message before `WaitForAllBuffersAllocated`, or a parent
11478    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
11479    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
11480    /// trigger buffer collection failure.
11481    ///
11482    /// This message is necessary because weak sysmem VMOs have not always been
11483    /// a thing, so older clients are not aware of the need to pay attention to
11484    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
11485    /// sysmem weak VMO handles asap. By having this message and requiring
11486    /// participants to indicate their acceptance of this aspect of the overall
11487    /// protocol, we avoid situations where an older client is delivered a weak
11488    /// VMO without any way for sysmem to get that VMO to close quickly later
11489    /// (and on a per-buffer basis).
11490    ///
11491    /// A participant that doesn't handle `close_weak_asap` and also doesn't
11492    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
11493    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
11494    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
11495    /// same participant has a child/delegate which does retrieve VMOs, that
11496    /// child/delegate will need to send `SetWeakOk` before
11497    /// `WaitForAllBuffersAllocated`.
11498    ///
11499    /// + request `for_child_nodes_also` If present and true, this means direct
11500    ///   child nodes of this node created after this message plus all
11501    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
11502    ///   those nodes. Any child node of this node that was created before this
11503    ///   message is not included. This setting is "sticky" in the sense that a
11504    ///   subsequent `SetWeakOk` without this bool set to true does not reset
11505    ///   the server-side bool. If this creates a problem for a participant, a
11506    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
11507    ///   tokens instead, as appropriate. A participant should only set
11508    ///   `for_child_nodes_also` true if the participant can really promise to
11509    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
11510    ///   weak VMO handles held by participants holding the corresponding child
11511    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
11512    ///   which are using sysmem(1) can be weak, despite the clients of those
11513    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
11514    ///   direct way to find out about `close_weak_asap`. This only applies to
11515    ///   descendents of this `Node` which are using sysmem(1), not to this
11516    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
11517    ///   token, which will fail allocation unless an ancestor of this `Node`
11518    ///   specified `for_child_nodes_also` true.
11519    SetWeakOk {
11520        payload: NodeSetWeakOkRequest,
11521        control_handle: BufferCollectionTokenGroupControlHandle,
11522    },
11523    /// The server_end will be closed after this `Node` and any child nodes have
11524    /// have released their buffer counts, making those counts available for
11525    /// reservation by a different `Node` via
11526    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
11527    ///
11528    /// The `Node` buffer counts may not be released until the entire tree of
11529    /// `Node`(s) is closed or failed, because
11530    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
11531    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
11532    /// `Node` buffer counts remain reserved until the orphaned node is later
11533    /// cleaned up.
11534    ///
11535    /// If the `Node` exceeds a fairly large number of attached eventpair server
11536    /// ends, a log message will indicate this and the `Node` (and the
11537    /// appropriate) sub-tree will fail.
11538    ///
11539    /// The `server_end` will remain open when
11540    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
11541    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
11542    /// [`fuchsia.sysmem2/BufferCollection`].
11543    ///
11544    /// This message can also be used with a
11545    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
11546    AttachNodeTracking {
11547        payload: NodeAttachNodeTrackingRequest,
11548        control_handle: BufferCollectionTokenGroupControlHandle,
11549    },
11550    /// Create a child [`fuchsia.sysmem2/BufferCollectionToken`]. Only one child
11551    /// (including its children) will be selected during allocation (or logical
11552    /// allocation).
11553    ///
11554    /// Before passing the client end of this token to
11555    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], completion of
11556    /// [`fuchsia.sysmem2/Node.Sync`] after
11557    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] is required.
11558    /// Or the client can use
11559    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`] which
11560    /// essentially includes the `Sync`.
11561    ///
11562    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
11563    /// fail the group's subtree and close the connection.
11564    ///
11565    /// After all children have been created, send AllChildrenPresent.
11566    ///
11567    /// + request `token_request` The server end of the new token channel.
11568    /// + request `rights_attenuation_mask` If ZX_RIGHT_SAME_RIGHTS, the created
11569    ///   token allows the holder to get the same rights to buffers as the
11570    ///   parent token (of the group) had. When the value isn't
11571    ///   ZX_RIGHT_SAME_RIGHTS, the value is interpretted as a bitmask with 0
11572    ///   bits ensuring those rights are attentuated, so 0xFFFFFFFF is a synonym
11573    ///   for ZX_RIGHT_SAME_RIGHTS. The value 0 is not allowed and intentionally
11574    ///   causes subtree failure.
11575    CreateChild {
11576        payload: BufferCollectionTokenGroupCreateChildRequest,
11577        control_handle: BufferCollectionTokenGroupControlHandle,
11578    },
11579    /// Create 1 or more child tokens at once, synchronously.  In contrast to
11580    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`], no
11581    /// [`fuchsia.sysmem2/Node.Sync`] is required before passing the client end
11582    /// of a returned token to
11583    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`].
11584    ///
11585    /// The lower-index child tokens are higher priority (attempted sooner) than
11586    /// higher-index child tokens.
11587    ///
11588    /// As per all child tokens, successful aggregation will choose exactly one
11589    /// child among all created children (across all children created across
11590    /// potentially multiple calls to
11591    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChild`] and
11592    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.CreateChildrenSync`]).
11593    ///
11594    /// The maximum permissible total number of children per group, and total
11595    /// number of nodes in an overall tree (from the root) are capped to limits
11596    /// which are not configurable via these protocols.
11597    ///
11598    /// Sending CreateChildrenSync after AllChildrenPresent is not permitted;
11599    /// this will fail the group's subtree and close the connection.
11600    ///
11601    /// After all children have been created, send AllChildrenPresent.
11602    ///
11603    /// + request `rights_attentuation_masks` The size of the
11604    ///   `rights_attentuation_masks` determines the number of created child
11605    ///   tokens. The value ZX_RIGHT_SAME_RIGHTS doesn't attenuate any rights.
11606    ///   The value 0xFFFFFFFF is a synonym for ZX_RIGHT_SAME_RIGHTS. For any
11607    ///   other value, each 0 bit in the mask attenuates that right.
11608    /// - response `tokens` The created child tokens.
11609    CreateChildrenSync {
11610        payload: BufferCollectionTokenGroupCreateChildrenSyncRequest,
11611        responder: BufferCollectionTokenGroupCreateChildrenSyncResponder,
11612    },
11613    /// Indicate that no more children will be created.
11614    ///
11615    /// After creating all children, the client should send
11616    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent`] to
11617    /// inform sysmem that no more children will be created, so that sysmem can
11618    /// know when it's ok to start aggregating constraints.
11619    ///
11620    /// Sending CreateChild after AllChildrenPresent is not permitted; this will
11621    /// fail the group's subtree and close the connection.
11622    ///
11623    /// If [`fuchsia.sysmem2/Node.Release`] is to be sent, it should be sent
11624    /// after `AllChildrenPresent`, else failure of the group's subtree will be
11625    /// triggered. This is intentionally not analogous to how `Release` without
11626    /// prior [`fuchsia.sysmem2/BufferCollection.SetConstraints`] doesn't cause
11627    /// subtree failure.
11628    AllChildrenPresent { control_handle: BufferCollectionTokenGroupControlHandle },
11629    /// An interaction was received which does not match any known method.
11630    #[non_exhaustive]
11631    _UnknownMethod {
11632        /// Ordinal of the method that was called.
11633        ordinal: u64,
11634        control_handle: BufferCollectionTokenGroupControlHandle,
11635        method_type: fidl::MethodType,
11636    },
11637}
11638
11639impl BufferCollectionTokenGroupRequest {
11640    #[allow(irrefutable_let_patterns)]
11641    pub fn into_sync(self) -> Option<(BufferCollectionTokenGroupSyncResponder)> {
11642        if let BufferCollectionTokenGroupRequest::Sync { responder } = self {
11643            Some((responder))
11644        } else {
11645            None
11646        }
11647    }
11648
11649    #[allow(irrefutable_let_patterns)]
11650    pub fn into_release(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11651        if let BufferCollectionTokenGroupRequest::Release { control_handle } = self {
11652            Some((control_handle))
11653        } else {
11654            None
11655        }
11656    }
11657
11658    #[allow(irrefutable_let_patterns)]
11659    pub fn into_set_name(
11660        self,
11661    ) -> Option<(NodeSetNameRequest, BufferCollectionTokenGroupControlHandle)> {
11662        if let BufferCollectionTokenGroupRequest::SetName { payload, control_handle } = self {
11663            Some((payload, control_handle))
11664        } else {
11665            None
11666        }
11667    }
11668
11669    #[allow(irrefutable_let_patterns)]
11670    pub fn into_set_debug_client_info(
11671        self,
11672    ) -> Option<(NodeSetDebugClientInfoRequest, BufferCollectionTokenGroupControlHandle)> {
11673        if let BufferCollectionTokenGroupRequest::SetDebugClientInfo { payload, control_handle } =
11674            self
11675        {
11676            Some((payload, control_handle))
11677        } else {
11678            None
11679        }
11680    }
11681
11682    #[allow(irrefutable_let_patterns)]
11683    pub fn into_set_debug_timeout_log_deadline(
11684        self,
11685    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, BufferCollectionTokenGroupControlHandle)>
11686    {
11687        if let BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline {
11688            payload,
11689            control_handle,
11690        } = self
11691        {
11692            Some((payload, control_handle))
11693        } else {
11694            None
11695        }
11696    }
11697
11698    #[allow(irrefutable_let_patterns)]
11699    pub fn into_set_verbose_logging(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11700        if let BufferCollectionTokenGroupRequest::SetVerboseLogging { control_handle } = self {
11701            Some((control_handle))
11702        } else {
11703            None
11704        }
11705    }
11706
11707    #[allow(irrefutable_let_patterns)]
11708    pub fn into_get_node_ref(self) -> Option<(BufferCollectionTokenGroupGetNodeRefResponder)> {
11709        if let BufferCollectionTokenGroupRequest::GetNodeRef { responder } = self {
11710            Some((responder))
11711        } else {
11712            None
11713        }
11714    }
11715
11716    #[allow(irrefutable_let_patterns)]
11717    pub fn into_is_alternate_for(
11718        self,
11719    ) -> Option<(NodeIsAlternateForRequest, BufferCollectionTokenGroupIsAlternateForResponder)>
11720    {
11721        if let BufferCollectionTokenGroupRequest::IsAlternateFor { payload, responder } = self {
11722            Some((payload, responder))
11723        } else {
11724            None
11725        }
11726    }
11727
11728    #[allow(irrefutable_let_patterns)]
11729    pub fn into_get_buffer_collection_id(
11730        self,
11731    ) -> Option<(BufferCollectionTokenGroupGetBufferCollectionIdResponder)> {
11732        if let BufferCollectionTokenGroupRequest::GetBufferCollectionId { responder } = self {
11733            Some((responder))
11734        } else {
11735            None
11736        }
11737    }
11738
11739    #[allow(irrefutable_let_patterns)]
11740    pub fn into_set_weak(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11741        if let BufferCollectionTokenGroupRequest::SetWeak { control_handle } = self {
11742            Some((control_handle))
11743        } else {
11744            None
11745        }
11746    }
11747
11748    #[allow(irrefutable_let_patterns)]
11749    pub fn into_set_weak_ok(
11750        self,
11751    ) -> Option<(NodeSetWeakOkRequest, BufferCollectionTokenGroupControlHandle)> {
11752        if let BufferCollectionTokenGroupRequest::SetWeakOk { payload, control_handle } = self {
11753            Some((payload, control_handle))
11754        } else {
11755            None
11756        }
11757    }
11758
11759    #[allow(irrefutable_let_patterns)]
11760    pub fn into_attach_node_tracking(
11761        self,
11762    ) -> Option<(NodeAttachNodeTrackingRequest, BufferCollectionTokenGroupControlHandle)> {
11763        if let BufferCollectionTokenGroupRequest::AttachNodeTracking { payload, control_handle } =
11764            self
11765        {
11766            Some((payload, control_handle))
11767        } else {
11768            None
11769        }
11770    }
11771
11772    #[allow(irrefutable_let_patterns)]
11773    pub fn into_create_child(
11774        self,
11775    ) -> Option<(
11776        BufferCollectionTokenGroupCreateChildRequest,
11777        BufferCollectionTokenGroupControlHandle,
11778    )> {
11779        if let BufferCollectionTokenGroupRequest::CreateChild { payload, control_handle } = self {
11780            Some((payload, control_handle))
11781        } else {
11782            None
11783        }
11784    }
11785
11786    #[allow(irrefutable_let_patterns)]
11787    pub fn into_create_children_sync(
11788        self,
11789    ) -> Option<(
11790        BufferCollectionTokenGroupCreateChildrenSyncRequest,
11791        BufferCollectionTokenGroupCreateChildrenSyncResponder,
11792    )> {
11793        if let BufferCollectionTokenGroupRequest::CreateChildrenSync { payload, responder } = self {
11794            Some((payload, responder))
11795        } else {
11796            None
11797        }
11798    }
11799
11800    #[allow(irrefutable_let_patterns)]
11801    pub fn into_all_children_present(self) -> Option<(BufferCollectionTokenGroupControlHandle)> {
11802        if let BufferCollectionTokenGroupRequest::AllChildrenPresent { control_handle } = self {
11803            Some((control_handle))
11804        } else {
11805            None
11806        }
11807    }
11808
11809    /// Name of the method defined in FIDL
11810    pub fn method_name(&self) -> &'static str {
11811        match *self {
11812            BufferCollectionTokenGroupRequest::Sync { .. } => "sync",
11813            BufferCollectionTokenGroupRequest::Release { .. } => "release",
11814            BufferCollectionTokenGroupRequest::SetName { .. } => "set_name",
11815            BufferCollectionTokenGroupRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
11816            BufferCollectionTokenGroupRequest::SetDebugTimeoutLogDeadline { .. } => {
11817                "set_debug_timeout_log_deadline"
11818            }
11819            BufferCollectionTokenGroupRequest::SetVerboseLogging { .. } => "set_verbose_logging",
11820            BufferCollectionTokenGroupRequest::GetNodeRef { .. } => "get_node_ref",
11821            BufferCollectionTokenGroupRequest::IsAlternateFor { .. } => "is_alternate_for",
11822            BufferCollectionTokenGroupRequest::GetBufferCollectionId { .. } => {
11823                "get_buffer_collection_id"
11824            }
11825            BufferCollectionTokenGroupRequest::SetWeak { .. } => "set_weak",
11826            BufferCollectionTokenGroupRequest::SetWeakOk { .. } => "set_weak_ok",
11827            BufferCollectionTokenGroupRequest::AttachNodeTracking { .. } => "attach_node_tracking",
11828            BufferCollectionTokenGroupRequest::CreateChild { .. } => "create_child",
11829            BufferCollectionTokenGroupRequest::CreateChildrenSync { .. } => "create_children_sync",
11830            BufferCollectionTokenGroupRequest::AllChildrenPresent { .. } => "all_children_present",
11831            BufferCollectionTokenGroupRequest::_UnknownMethod {
11832                method_type: fidl::MethodType::OneWay,
11833                ..
11834            } => "unknown one-way method",
11835            BufferCollectionTokenGroupRequest::_UnknownMethod {
11836                method_type: fidl::MethodType::TwoWay,
11837                ..
11838            } => "unknown two-way method",
11839        }
11840    }
11841}
11842
11843#[derive(Debug, Clone)]
11844pub struct BufferCollectionTokenGroupControlHandle {
11845    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
11846}
11847
11848impl fidl::endpoints::ControlHandle for BufferCollectionTokenGroupControlHandle {
11849    fn shutdown(&self) {
11850        self.inner.shutdown()
11851    }
11852    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
11853        self.inner.shutdown_with_epitaph(status)
11854    }
11855
11856    fn is_closed(&self) -> bool {
11857        self.inner.channel().is_closed()
11858    }
11859    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
11860        self.inner.channel().on_closed()
11861    }
11862
11863    #[cfg(target_os = "fuchsia")]
11864    fn signal_peer(
11865        &self,
11866        clear_mask: zx::Signals,
11867        set_mask: zx::Signals,
11868    ) -> Result<(), zx_status::Status> {
11869        use fidl::Peered;
11870        self.inner.channel().signal_peer(clear_mask, set_mask)
11871    }
11872}
11873
11874impl BufferCollectionTokenGroupControlHandle {}
11875
11876#[must_use = "FIDL methods require a response to be sent"]
11877#[derive(Debug)]
11878pub struct BufferCollectionTokenGroupSyncResponder {
11879    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
11880    tx_id: u32,
11881}
11882
11883/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
11884/// if the responder is dropped without sending a response, so that the client
11885/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
11886impl std::ops::Drop for BufferCollectionTokenGroupSyncResponder {
11887    fn drop(&mut self) {
11888        self.control_handle.shutdown();
11889        // Safety: drops once, never accessed again
11890        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11891    }
11892}
11893
11894impl fidl::endpoints::Responder for BufferCollectionTokenGroupSyncResponder {
11895    type ControlHandle = BufferCollectionTokenGroupControlHandle;
11896
11897    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
11898        &self.control_handle
11899    }
11900
11901    fn drop_without_shutdown(mut self) {
11902        // Safety: drops once, never accessed again due to mem::forget
11903        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11904        // Prevent Drop from running (which would shut down the channel)
11905        std::mem::forget(self);
11906    }
11907}
11908
11909impl BufferCollectionTokenGroupSyncResponder {
11910    /// Sends a response to the FIDL transaction.
11911    ///
11912    /// Sets the channel to shutdown if an error occurs.
11913    pub fn send(self) -> Result<(), fidl::Error> {
11914        let _result = self.send_raw();
11915        if _result.is_err() {
11916            self.control_handle.shutdown();
11917        }
11918        self.drop_without_shutdown();
11919        _result
11920    }
11921
11922    /// Similar to "send" but does not shutdown the channel if an error occurs.
11923    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
11924        let _result = self.send_raw();
11925        self.drop_without_shutdown();
11926        _result
11927    }
11928
11929    fn send_raw(&self) -> Result<(), fidl::Error> {
11930        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
11931            fidl::encoding::Flexible::new(()),
11932            self.tx_id,
11933            0x11ac2555cf575b54,
11934            fidl::encoding::DynamicFlags::FLEXIBLE,
11935        )
11936    }
11937}
11938
11939#[must_use = "FIDL methods require a response to be sent"]
11940#[derive(Debug)]
11941pub struct BufferCollectionTokenGroupGetNodeRefResponder {
11942    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
11943    tx_id: u32,
11944}
11945
11946/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
11947/// if the responder is dropped without sending a response, so that the client
11948/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
11949impl std::ops::Drop for BufferCollectionTokenGroupGetNodeRefResponder {
11950    fn drop(&mut self) {
11951        self.control_handle.shutdown();
11952        // Safety: drops once, never accessed again
11953        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11954    }
11955}
11956
11957impl fidl::endpoints::Responder for BufferCollectionTokenGroupGetNodeRefResponder {
11958    type ControlHandle = BufferCollectionTokenGroupControlHandle;
11959
11960    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
11961        &self.control_handle
11962    }
11963
11964    fn drop_without_shutdown(mut self) {
11965        // Safety: drops once, never accessed again due to mem::forget
11966        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
11967        // Prevent Drop from running (which would shut down the channel)
11968        std::mem::forget(self);
11969    }
11970}
11971
11972impl BufferCollectionTokenGroupGetNodeRefResponder {
11973    /// Sends a response to the FIDL transaction.
11974    ///
11975    /// Sets the channel to shutdown if an error occurs.
11976    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
11977        let _result = self.send_raw(payload);
11978        if _result.is_err() {
11979            self.control_handle.shutdown();
11980        }
11981        self.drop_without_shutdown();
11982        _result
11983    }
11984
11985    /// Similar to "send" but does not shutdown the channel if an error occurs.
11986    pub fn send_no_shutdown_on_err(
11987        self,
11988        mut payload: NodeGetNodeRefResponse,
11989    ) -> Result<(), fidl::Error> {
11990        let _result = self.send_raw(payload);
11991        self.drop_without_shutdown();
11992        _result
11993    }
11994
11995    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
11996        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
11997            fidl::encoding::Flexible::new(&mut payload),
11998            self.tx_id,
11999            0x5b3d0e51614df053,
12000            fidl::encoding::DynamicFlags::FLEXIBLE,
12001        )
12002    }
12003}
12004
12005#[must_use = "FIDL methods require a response to be sent"]
12006#[derive(Debug)]
12007pub struct BufferCollectionTokenGroupIsAlternateForResponder {
12008    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12009    tx_id: u32,
12010}
12011
12012/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12013/// if the responder is dropped without sending a response, so that the client
12014/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12015impl std::ops::Drop for BufferCollectionTokenGroupIsAlternateForResponder {
12016    fn drop(&mut self) {
12017        self.control_handle.shutdown();
12018        // Safety: drops once, never accessed again
12019        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12020    }
12021}
12022
12023impl fidl::endpoints::Responder for BufferCollectionTokenGroupIsAlternateForResponder {
12024    type ControlHandle = BufferCollectionTokenGroupControlHandle;
12025
12026    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12027        &self.control_handle
12028    }
12029
12030    fn drop_without_shutdown(mut self) {
12031        // Safety: drops once, never accessed again due to mem::forget
12032        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12033        // Prevent Drop from running (which would shut down the channel)
12034        std::mem::forget(self);
12035    }
12036}
12037
12038impl BufferCollectionTokenGroupIsAlternateForResponder {
12039    /// Sends a response to the FIDL transaction.
12040    ///
12041    /// Sets the channel to shutdown if an error occurs.
12042    pub fn send(
12043        self,
12044        mut result: Result<&NodeIsAlternateForResponse, Error>,
12045    ) -> Result<(), fidl::Error> {
12046        let _result = self.send_raw(result);
12047        if _result.is_err() {
12048            self.control_handle.shutdown();
12049        }
12050        self.drop_without_shutdown();
12051        _result
12052    }
12053
12054    /// Similar to "send" but does not shutdown the channel if an error occurs.
12055    pub fn send_no_shutdown_on_err(
12056        self,
12057        mut result: Result<&NodeIsAlternateForResponse, Error>,
12058    ) -> Result<(), fidl::Error> {
12059        let _result = self.send_raw(result);
12060        self.drop_without_shutdown();
12061        _result
12062    }
12063
12064    fn send_raw(
12065        &self,
12066        mut result: Result<&NodeIsAlternateForResponse, Error>,
12067    ) -> Result<(), fidl::Error> {
12068        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
12069            NodeIsAlternateForResponse,
12070            Error,
12071        >>(
12072            fidl::encoding::FlexibleResult::new(result),
12073            self.tx_id,
12074            0x3a58e00157e0825,
12075            fidl::encoding::DynamicFlags::FLEXIBLE,
12076        )
12077    }
12078}
12079
12080#[must_use = "FIDL methods require a response to be sent"]
12081#[derive(Debug)]
12082pub struct BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12083    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12084    tx_id: u32,
12085}
12086
12087/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12088/// if the responder is dropped without sending a response, so that the client
12089/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12090impl std::ops::Drop for BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12091    fn drop(&mut self) {
12092        self.control_handle.shutdown();
12093        // Safety: drops once, never accessed again
12094        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12095    }
12096}
12097
12098impl fidl::endpoints::Responder for BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12099    type ControlHandle = BufferCollectionTokenGroupControlHandle;
12100
12101    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12102        &self.control_handle
12103    }
12104
12105    fn drop_without_shutdown(mut self) {
12106        // Safety: drops once, never accessed again due to mem::forget
12107        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12108        // Prevent Drop from running (which would shut down the channel)
12109        std::mem::forget(self);
12110    }
12111}
12112
12113impl BufferCollectionTokenGroupGetBufferCollectionIdResponder {
12114    /// Sends a response to the FIDL transaction.
12115    ///
12116    /// Sets the channel to shutdown if an error occurs.
12117    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
12118        let _result = self.send_raw(payload);
12119        if _result.is_err() {
12120            self.control_handle.shutdown();
12121        }
12122        self.drop_without_shutdown();
12123        _result
12124    }
12125
12126    /// Similar to "send" but does not shutdown the channel if an error occurs.
12127    pub fn send_no_shutdown_on_err(
12128        self,
12129        mut payload: &NodeGetBufferCollectionIdResponse,
12130    ) -> Result<(), fidl::Error> {
12131        let _result = self.send_raw(payload);
12132        self.drop_without_shutdown();
12133        _result
12134    }
12135
12136    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
12137        self.control_handle
12138            .inner
12139            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
12140                fidl::encoding::Flexible::new(payload),
12141                self.tx_id,
12142                0x77d19a494b78ba8c,
12143                fidl::encoding::DynamicFlags::FLEXIBLE,
12144            )
12145    }
12146}
12147
12148#[must_use = "FIDL methods require a response to be sent"]
12149#[derive(Debug)]
12150pub struct BufferCollectionTokenGroupCreateChildrenSyncResponder {
12151    control_handle: std::mem::ManuallyDrop<BufferCollectionTokenGroupControlHandle>,
12152    tx_id: u32,
12153}
12154
12155/// Set the the channel to be shutdown (see [`BufferCollectionTokenGroupControlHandle::shutdown`])
12156/// if the responder is dropped without sending a response, so that the client
12157/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
12158impl std::ops::Drop for BufferCollectionTokenGroupCreateChildrenSyncResponder {
12159    fn drop(&mut self) {
12160        self.control_handle.shutdown();
12161        // Safety: drops once, never accessed again
12162        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12163    }
12164}
12165
12166impl fidl::endpoints::Responder for BufferCollectionTokenGroupCreateChildrenSyncResponder {
12167    type ControlHandle = BufferCollectionTokenGroupControlHandle;
12168
12169    fn control_handle(&self) -> &BufferCollectionTokenGroupControlHandle {
12170        &self.control_handle
12171    }
12172
12173    fn drop_without_shutdown(mut self) {
12174        // Safety: drops once, never accessed again due to mem::forget
12175        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
12176        // Prevent Drop from running (which would shut down the channel)
12177        std::mem::forget(self);
12178    }
12179}
12180
12181impl BufferCollectionTokenGroupCreateChildrenSyncResponder {
12182    /// Sends a response to the FIDL transaction.
12183    ///
12184    /// Sets the channel to shutdown if an error occurs.
12185    pub fn send(
12186        self,
12187        mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12188    ) -> Result<(), fidl::Error> {
12189        let _result = self.send_raw(payload);
12190        if _result.is_err() {
12191            self.control_handle.shutdown();
12192        }
12193        self.drop_without_shutdown();
12194        _result
12195    }
12196
12197    /// Similar to "send" but does not shutdown the channel if an error occurs.
12198    pub fn send_no_shutdown_on_err(
12199        self,
12200        mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12201    ) -> Result<(), fidl::Error> {
12202        let _result = self.send_raw(payload);
12203        self.drop_without_shutdown();
12204        _result
12205    }
12206
12207    fn send_raw(
12208        &self,
12209        mut payload: BufferCollectionTokenGroupCreateChildrenSyncResponse,
12210    ) -> Result<(), fidl::Error> {
12211        self.control_handle.inner.send::<fidl::encoding::FlexibleType<
12212            BufferCollectionTokenGroupCreateChildrenSyncResponse,
12213        >>(
12214            fidl::encoding::Flexible::new(&mut payload),
12215            self.tx_id,
12216            0x15dea448c536070a,
12217            fidl::encoding::DynamicFlags::FLEXIBLE,
12218        )
12219    }
12220}
12221
12222#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
12223pub struct NodeMarker;
12224
12225impl fidl::endpoints::ProtocolMarker for NodeMarker {
12226    type Proxy = NodeProxy;
12227    type RequestStream = NodeRequestStream;
12228    #[cfg(target_os = "fuchsia")]
12229    type SynchronousProxy = NodeSynchronousProxy;
12230
12231    const DEBUG_NAME: &'static str = "(anonymous) Node";
12232}
12233pub type NodeIsAlternateForResult = Result<NodeIsAlternateForResponse, Error>;
12234
12235pub trait NodeProxyInterface: Send + Sync {
12236    type SyncResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
12237    fn r#sync(&self) -> Self::SyncResponseFut;
12238    fn r#release(&self) -> Result<(), fidl::Error>;
12239    fn r#set_name(&self, payload: &NodeSetNameRequest) -> Result<(), fidl::Error>;
12240    fn r#set_debug_client_info(
12241        &self,
12242        payload: &NodeSetDebugClientInfoRequest,
12243    ) -> Result<(), fidl::Error>;
12244    fn r#set_debug_timeout_log_deadline(
12245        &self,
12246        payload: &NodeSetDebugTimeoutLogDeadlineRequest,
12247    ) -> Result<(), fidl::Error>;
12248    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error>;
12249    type GetNodeRefResponseFut: std::future::Future<Output = Result<NodeGetNodeRefResponse, fidl::Error>>
12250        + Send;
12251    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut;
12252    type IsAlternateForResponseFut: std::future::Future<Output = Result<NodeIsAlternateForResult, fidl::Error>>
12253        + Send;
12254    fn r#is_alternate_for(
12255        &self,
12256        payload: NodeIsAlternateForRequest,
12257    ) -> Self::IsAlternateForResponseFut;
12258    type GetBufferCollectionIdResponseFut: std::future::Future<Output = Result<NodeGetBufferCollectionIdResponse, fidl::Error>>
12259        + Send;
12260    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut;
12261    fn r#set_weak(&self) -> Result<(), fidl::Error>;
12262    fn r#set_weak_ok(&self, payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error>;
12263    fn r#attach_node_tracking(
12264        &self,
12265        payload: NodeAttachNodeTrackingRequest,
12266    ) -> Result<(), fidl::Error>;
12267}
12268#[derive(Debug)]
12269#[cfg(target_os = "fuchsia")]
12270pub struct NodeSynchronousProxy {
12271    client: fidl::client::sync::Client,
12272}
12273
12274#[cfg(target_os = "fuchsia")]
12275impl fidl::endpoints::SynchronousProxy for NodeSynchronousProxy {
12276    type Proxy = NodeProxy;
12277    type Protocol = NodeMarker;
12278
12279    fn from_channel(inner: fidl::Channel) -> Self {
12280        Self::new(inner)
12281    }
12282
12283    fn into_channel(self) -> fidl::Channel {
12284        self.client.into_channel()
12285    }
12286
12287    fn as_channel(&self) -> &fidl::Channel {
12288        self.client.as_channel()
12289    }
12290}
12291
12292#[cfg(target_os = "fuchsia")]
12293impl NodeSynchronousProxy {
12294    pub fn new(channel: fidl::Channel) -> Self {
12295        let protocol_name = <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
12296        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
12297    }
12298
12299    pub fn into_channel(self) -> fidl::Channel {
12300        self.client.into_channel()
12301    }
12302
12303    /// Waits until an event arrives and returns it. It is safe for other
12304    /// threads to make concurrent requests while waiting for an event.
12305    pub fn wait_for_event(&self, deadline: zx::MonotonicInstant) -> Result<NodeEvent, fidl::Error> {
12306        NodeEvent::decode(self.client.wait_for_event(deadline)?)
12307    }
12308
12309    /// Ensure that previous messages have been received server side. This is
12310    /// particularly useful after previous messages that created new tokens,
12311    /// because a token must be known to the sysmem server before sending the
12312    /// token to another participant.
12313    ///
12314    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
12315    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
12316    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
12317    /// to mitigate the possibility of a hostile/fake
12318    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
12319    /// Another way is to pass the token to
12320    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
12321    /// the token as part of exchanging it for a
12322    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
12323    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
12324    /// of stalling.
12325    ///
12326    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
12327    /// and then starting and completing a `Sync`, it's then safe to send the
12328    /// `BufferCollectionToken` client ends to other participants knowing the
12329    /// server will recognize the tokens when they're sent by the other
12330    /// participants to sysmem in a
12331    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
12332    /// efficient way to create tokens while avoiding unnecessary round trips.
12333    ///
12334    /// Other options include waiting for each
12335    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
12336    /// individually (using separate call to `Sync` after each), or calling
12337    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
12338    /// converted to a `BufferCollection` via
12339    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
12340    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
12341    /// the sync step and can create multiple tokens at once.
12342    pub fn r#sync(&self, ___deadline: zx::MonotonicInstant) -> Result<(), fidl::Error> {
12343        let _response = self.client.send_query::<
12344            fidl::encoding::EmptyPayload,
12345            fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
12346        >(
12347            (),
12348            0x11ac2555cf575b54,
12349            fidl::encoding::DynamicFlags::FLEXIBLE,
12350            ___deadline,
12351        )?
12352        .into_result::<NodeMarker>("sync")?;
12353        Ok(_response)
12354    }
12355
12356    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
12357    ///
12358    /// Normally a participant will convert a `BufferCollectionToken` into a
12359    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
12360    /// `Release` via the token (and then close the channel immediately or
12361    /// shortly later in response to server closing the server end), which
12362    /// avoids causing buffer collection failure. Without a prior `Release`,
12363    /// closing the `BufferCollectionToken` client end will cause buffer
12364    /// collection failure.
12365    ///
12366    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
12367    ///
12368    /// By default the server handles unexpected closure of a
12369    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
12370    /// first) by failing the buffer collection. Partly this is to expedite
12371    /// closing VMO handles to reclaim memory when any participant fails. If a
12372    /// participant would like to cleanly close a `BufferCollection` without
12373    /// causing buffer collection failure, the participant can send `Release`
12374    /// before closing the `BufferCollection` client end. The `Release` can
12375    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
12376    /// buffer collection won't require constraints from this node in order to
12377    /// allocate. If after `SetConstraints`, the constraints are retained and
12378    /// aggregated, despite the lack of `BufferCollection` connection at the
12379    /// time of constraints aggregation.
12380    ///
12381    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
12382    ///
12383    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
12384    /// end (without `Release` first) will trigger failure of the buffer
12385    /// collection. To close a `BufferCollectionTokenGroup` channel without
12386    /// failing the buffer collection, ensure that AllChildrenPresent() has been
12387    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
12388    /// client end.
12389    ///
12390    /// If `Release` occurs before
12391    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
12392    /// buffer collection will fail (triggered by reception of `Release` without
12393    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
12394    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
12395    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
12396    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
12397    /// close requires `AllChildrenPresent` (if not already sent), then
12398    /// `Release`, then close client end.
12399    ///
12400    /// If `Release` occurs after `AllChildrenPresent`, the children and all
12401    /// their constraints remain intact (just as they would if the
12402    /// `BufferCollectionTokenGroup` channel had remained open), and the client
12403    /// end close doesn't trigger buffer collection failure.
12404    ///
12405    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
12406    ///
12407    /// For brevity, the per-channel-protocol paragraphs above ignore the
12408    /// separate failure domain created by
12409    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
12410    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
12411    /// unexpectedly closes (without `Release` first) and that client end is
12412    /// under a failure domain, instead of failing the whole buffer collection,
12413    /// the failure domain is failed, but the buffer collection itself is
12414    /// isolated from failure of the failure domain. Such failure domains can be
12415    /// nested, in which case only the inner-most failure domain in which the
12416    /// `Node` resides fails.
12417    pub fn r#release(&self) -> Result<(), fidl::Error> {
12418        self.client.send::<fidl::encoding::EmptyPayload>(
12419            (),
12420            0x6a5cae7d6d6e04c6,
12421            fidl::encoding::DynamicFlags::FLEXIBLE,
12422        )
12423    }
12424
12425    /// Set a name for VMOs in this buffer collection.
12426    ///
12427    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
12428    /// will be truncated to fit. The name of the vmo will be suffixed with the
12429    /// buffer index within the collection (if the suffix fits within
12430    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
12431    /// listed in the inspect data.
12432    ///
12433    /// The name only affects VMOs allocated after the name is set; this call
12434    /// does not rename existing VMOs. If multiple clients set different names
12435    /// then the larger priority value will win. Setting a new name with the
12436    /// same priority as a prior name doesn't change the name.
12437    ///
12438    /// All table fields are currently required.
12439    ///
12440    /// + request `priority` The name is only set if this is the first `SetName`
12441    ///   or if `priority` is greater than any previous `priority` value in
12442    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
12443    /// + request `name` The name for VMOs created under this buffer collection.
12444    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
12445        self.client.send::<NodeSetNameRequest>(
12446            payload,
12447            0xb41f1624f48c1e9,
12448            fidl::encoding::DynamicFlags::FLEXIBLE,
12449        )
12450    }
12451
12452    /// Set information about the current client that can be used by sysmem to
12453    /// help diagnose leaking memory and allocation stalls waiting for a
12454    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
12455    ///
12456    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
12457    /// `Node`(s) derived from this `Node`, unless overriden by
12458    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
12459    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
12460    ///
12461    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
12462    /// `Allocator` is the most efficient way to ensure that all
12463    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
12464    /// set, and is also more efficient than separately sending the same debug
12465    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
12466    /// created [`fuchsia.sysmem2/Node`].
12467    ///
12468    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
12469    /// indicate which client is closing their channel first, leading to subtree
12470    /// failure (which can be normal if the purpose of the subtree is over, but
12471    /// if happening earlier than expected, the client-channel-specific name can
12472    /// help diagnose where the failure is first coming from, from sysmem's
12473    /// point of view).
12474    ///
12475    /// All table fields are currently required.
12476    ///
12477    /// + request `name` This can be an arbitrary string, but the current
12478    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
12479    /// + request `id` This can be an arbitrary id, but the current process ID
12480    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
12481    pub fn r#set_debug_client_info(
12482        &self,
12483        mut payload: &NodeSetDebugClientInfoRequest,
12484    ) -> Result<(), fidl::Error> {
12485        self.client.send::<NodeSetDebugClientInfoRequest>(
12486            payload,
12487            0x5cde8914608d99b1,
12488            fidl::encoding::DynamicFlags::FLEXIBLE,
12489        )
12490    }
12491
12492    /// Sysmem logs a warning if sysmem hasn't seen
12493    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
12494    /// within 5 seconds after creation of a new collection.
12495    ///
12496    /// Clients can call this method to change when the log is printed. If
12497    /// multiple client set the deadline, it's unspecified which deadline will
12498    /// take effect.
12499    ///
12500    /// In most cases the default works well.
12501    ///
12502    /// All table fields are currently required.
12503    ///
12504    /// + request `deadline` The time at which sysmem will start trying to log
12505    ///   the warning, unless all constraints are with sysmem by then.
12506    pub fn r#set_debug_timeout_log_deadline(
12507        &self,
12508        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
12509    ) -> Result<(), fidl::Error> {
12510        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
12511            payload,
12512            0x716b0af13d5c0806,
12513            fidl::encoding::DynamicFlags::FLEXIBLE,
12514        )
12515    }
12516
12517    /// This enables verbose logging for the buffer collection.
12518    ///
12519    /// Verbose logging includes constraints set via
12520    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
12521    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
12522    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
12523    /// the tree of `Node`(s).
12524    ///
12525    /// Normally sysmem prints only a single line complaint when aggregation
12526    /// fails, with just the specific detailed reason that aggregation failed,
12527    /// with little surrounding context.  While this is often enough to diagnose
12528    /// a problem if only a small change was made and everything was working
12529    /// before the small change, it's often not particularly helpful for getting
12530    /// a new buffer collection to work for the first time.  Especially with
12531    /// more complex trees of nodes, involving things like
12532    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
12533    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
12534    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
12535    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
12536    /// looks like and why it's failing a logical allocation, or why a tree or
12537    /// subtree is failing sooner than expected.
12538    ///
12539    /// The intent of the extra logging is to be acceptable from a performance
12540    /// point of view, under the assumption that verbose logging is only enabled
12541    /// on a low number of buffer collections. If we're not tracking down a bug,
12542    /// we shouldn't send this message.
12543    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
12544        self.client.send::<fidl::encoding::EmptyPayload>(
12545            (),
12546            0x5209c77415b4dfad,
12547            fidl::encoding::DynamicFlags::FLEXIBLE,
12548        )
12549    }
12550
12551    /// This gets a handle that can be used as a parameter to
12552    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
12553    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
12554    /// client obtained this handle from this `Node`.
12555    ///
12556    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
12557    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
12558    /// despite the two calls typically being on different channels.
12559    ///
12560    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
12561    ///
12562    /// All table fields are currently required.
12563    ///
12564    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
12565    ///   different `Node` channel, to prove that the client obtained the handle
12566    ///   from this `Node`.
12567    pub fn r#get_node_ref(
12568        &self,
12569        ___deadline: zx::MonotonicInstant,
12570    ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
12571        let _response = self.client.send_query::<
12572            fidl::encoding::EmptyPayload,
12573            fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
12574        >(
12575            (),
12576            0x5b3d0e51614df053,
12577            fidl::encoding::DynamicFlags::FLEXIBLE,
12578            ___deadline,
12579        )?
12580        .into_result::<NodeMarker>("get_node_ref")?;
12581        Ok(_response)
12582    }
12583
12584    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
12585    /// rooted at a different child token of a common parent
12586    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
12587    /// passed-in `node_ref`.
12588    ///
12589    /// This call is for assisting with admission control de-duplication, and
12590    /// with debugging.
12591    ///
12592    /// The `node_ref` must be obtained using
12593    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
12594    ///
12595    /// The `node_ref` can be a duplicated handle; it's not necessary to call
12596    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
12597    ///
12598    /// If a calling token may not actually be a valid token at all due to a
12599    /// potentially hostile/untrusted provider of the token, call
12600    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
12601    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
12602    /// never responds due to a calling token not being a real token (not really
12603    /// talking to sysmem).  Another option is to call
12604    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
12605    /// which also validates the token along with converting it to a
12606    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
12607    ///
12608    /// All table fields are currently required.
12609    ///
12610    /// - response `is_alternate`
12611    ///   - true: The first parent node in common between the calling node and
12612    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
12613    ///     that the calling `Node` and the `node_ref` `Node` will not have both
12614    ///     their constraints apply - rather sysmem will choose one or the other
12615    ///     of the constraints - never both.  This is because only one child of
12616    ///     a `BufferCollectionTokenGroup` is selected during logical
12617    ///     allocation, with only that one child's subtree contributing to
12618    ///     constraints aggregation.
12619    ///   - false: The first parent node in common between the calling `Node`
12620    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
12621    ///     Currently, this means the first parent node in common is a
12622    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
12623    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
12624    ///     `Node` may have both their constraints apply during constraints
12625    ///     aggregation of the logical allocation, if both `Node`(s) are
12626    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
12627    ///     this case, there is no `BufferCollectionTokenGroup` that will
12628    ///     directly prevent the two `Node`(s) from both being selected and
12629    ///     their constraints both aggregated, but even when false, one or both
12630    ///     `Node`(s) may still be eliminated from consideration if one or both
12631    ///     `Node`(s) has a direct or indirect parent
12632    ///     `BufferCollectionTokenGroup` which selects a child subtree other
12633    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
12634    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
12635    ///   associated with the same buffer collection as the calling `Node`.
12636    ///   Another reason for this error is if the `node_ref` is an
12637    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
12638    ///   a real `node_ref` obtained from `GetNodeRef`.
12639    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
12640    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
12641    ///   the needed rights expected on a real `node_ref`.
12642    /// * No other failing status codes are returned by this call.  However,
12643    ///   sysmem may add additional codes in future, so the client should have
12644    ///   sensible default handling for any failing status code.
12645    pub fn r#is_alternate_for(
12646        &self,
12647        mut payload: NodeIsAlternateForRequest,
12648        ___deadline: zx::MonotonicInstant,
12649    ) -> Result<NodeIsAlternateForResult, fidl::Error> {
12650        let _response = self.client.send_query::<
12651            NodeIsAlternateForRequest,
12652            fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
12653        >(
12654            &mut payload,
12655            0x3a58e00157e0825,
12656            fidl::encoding::DynamicFlags::FLEXIBLE,
12657            ___deadline,
12658        )?
12659        .into_result::<NodeMarker>("is_alternate_for")?;
12660        Ok(_response.map(|x| x))
12661    }
12662
12663    /// Get the buffer collection ID. This ID is also available from
12664    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
12665    /// within the collection).
12666    ///
12667    /// This call is mainly useful in situations where we can't convey a
12668    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
12669    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
12670    /// handle, which can be joined back up with a `BufferCollection` client end
12671    /// that was created via a different path. Prefer to convey a
12672    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
12673    ///
12674    /// Trusting a `buffer_collection_id` value from a source other than sysmem
12675    /// is analogous to trusting a koid value from a source other than zircon.
12676    /// Both should be avoided unless really necessary, and both require
12677    /// caution. In some situations it may be reasonable to refer to a
12678    /// pre-established `BufferCollection` by `buffer_collection_id` via a
12679    /// protocol for efficiency reasons, but an incoming value purporting to be
12680    /// a `buffer_collection_id` is not sufficient alone to justify granting the
12681    /// sender of the `buffer_collection_id` any capability. The sender must
12682    /// first prove to a receiver that the sender has/had a VMO or has/had a
12683    /// `BufferCollectionToken` to the same collection by sending a handle that
12684    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
12685    /// `buffer_collection_id` value. The receiver should take care to avoid
12686    /// assuming that a sender had a `BufferCollectionToken` in cases where the
12687    /// sender has only proven that the sender had a VMO.
12688    ///
12689    /// - response `buffer_collection_id` This ID is unique per buffer
12690    ///   collection per boot. Each buffer is uniquely identified by the
12691    ///   `buffer_collection_id` and `buffer_index` together.
12692    pub fn r#get_buffer_collection_id(
12693        &self,
12694        ___deadline: zx::MonotonicInstant,
12695    ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
12696        let _response = self.client.send_query::<
12697            fidl::encoding::EmptyPayload,
12698            fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
12699        >(
12700            (),
12701            0x77d19a494b78ba8c,
12702            fidl::encoding::DynamicFlags::FLEXIBLE,
12703            ___deadline,
12704        )?
12705        .into_result::<NodeMarker>("get_buffer_collection_id")?;
12706        Ok(_response)
12707    }
12708
12709    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
12710    /// created after this message to weak, which means that a client's `Node`
12711    /// client end (or a child created after this message) is not alone
12712    /// sufficient to keep allocated VMOs alive.
12713    ///
12714    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
12715    /// `close_weak_asap`.
12716    ///
12717    /// This message is only permitted before the `Node` becomes ready for
12718    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
12719    ///   * `BufferCollectionToken`: any time
12720    ///   * `BufferCollection`: before `SetConstraints`
12721    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
12722    ///
12723    /// Currently, no conversion from strong `Node` to weak `Node` after ready
12724    /// for allocation is provided, but a client can simulate that by creating
12725    /// an additional `Node` before allocation and setting that additional
12726    /// `Node` to weak, and then potentially at some point later sending
12727    /// `Release` and closing the client end of the client's strong `Node`, but
12728    /// keeping the client's weak `Node`.
12729    ///
12730    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
12731    /// collection failure (all `Node` client end(s) will see
12732    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
12733    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
12734    /// this situation until all `Node`(s) are ready for allocation. For initial
12735    /// allocation to succeed, at least one strong `Node` is required to exist
12736    /// at allocation time, but after that client receives VMO handles, that
12737    /// client can `BufferCollection.Release` and close the client end without
12738    /// causing this type of failure.
12739    ///
12740    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
12741    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
12742    /// separately as appropriate.
12743    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
12744        self.client.send::<fidl::encoding::EmptyPayload>(
12745            (),
12746            0x22dd3ea514eeffe1,
12747            fidl::encoding::DynamicFlags::FLEXIBLE,
12748        )
12749    }
12750
12751    /// This indicates to sysmem that the client is prepared to pay attention to
12752    /// `close_weak_asap`.
12753    ///
12754    /// If sent, this message must be before
12755    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
12756    ///
12757    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
12758    /// send this message before `WaitForAllBuffersAllocated`, or a parent
12759    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
12760    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
12761    /// trigger buffer collection failure.
12762    ///
12763    /// This message is necessary because weak sysmem VMOs have not always been
12764    /// a thing, so older clients are not aware of the need to pay attention to
12765    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
12766    /// sysmem weak VMO handles asap. By having this message and requiring
12767    /// participants to indicate their acceptance of this aspect of the overall
12768    /// protocol, we avoid situations where an older client is delivered a weak
12769    /// VMO without any way for sysmem to get that VMO to close quickly later
12770    /// (and on a per-buffer basis).
12771    ///
12772    /// A participant that doesn't handle `close_weak_asap` and also doesn't
12773    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
12774    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
12775    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
12776    /// same participant has a child/delegate which does retrieve VMOs, that
12777    /// child/delegate will need to send `SetWeakOk` before
12778    /// `WaitForAllBuffersAllocated`.
12779    ///
12780    /// + request `for_child_nodes_also` If present and true, this means direct
12781    ///   child nodes of this node created after this message plus all
12782    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
12783    ///   those nodes. Any child node of this node that was created before this
12784    ///   message is not included. This setting is "sticky" in the sense that a
12785    ///   subsequent `SetWeakOk` without this bool set to true does not reset
12786    ///   the server-side bool. If this creates a problem for a participant, a
12787    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
12788    ///   tokens instead, as appropriate. A participant should only set
12789    ///   `for_child_nodes_also` true if the participant can really promise to
12790    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
12791    ///   weak VMO handles held by participants holding the corresponding child
12792    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
12793    ///   which are using sysmem(1) can be weak, despite the clients of those
12794    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
12795    ///   direct way to find out about `close_weak_asap`. This only applies to
12796    ///   descendents of this `Node` which are using sysmem(1), not to this
12797    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
12798    ///   token, which will fail allocation unless an ancestor of this `Node`
12799    ///   specified `for_child_nodes_also` true.
12800    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
12801        self.client.send::<NodeSetWeakOkRequest>(
12802            &mut payload,
12803            0x38a44fc4d7724be9,
12804            fidl::encoding::DynamicFlags::FLEXIBLE,
12805        )
12806    }
12807
12808    /// The server_end will be closed after this `Node` and any child nodes have
12809    /// have released their buffer counts, making those counts available for
12810    /// reservation by a different `Node` via
12811    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
12812    ///
12813    /// The `Node` buffer counts may not be released until the entire tree of
12814    /// `Node`(s) is closed or failed, because
12815    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
12816    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
12817    /// `Node` buffer counts remain reserved until the orphaned node is later
12818    /// cleaned up.
12819    ///
12820    /// If the `Node` exceeds a fairly large number of attached eventpair server
12821    /// ends, a log message will indicate this and the `Node` (and the
12822    /// appropriate) sub-tree will fail.
12823    ///
12824    /// The `server_end` will remain open when
12825    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
12826    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
12827    /// [`fuchsia.sysmem2/BufferCollection`].
12828    ///
12829    /// This message can also be used with a
12830    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
12831    pub fn r#attach_node_tracking(
12832        &self,
12833        mut payload: NodeAttachNodeTrackingRequest,
12834    ) -> Result<(), fidl::Error> {
12835        self.client.send::<NodeAttachNodeTrackingRequest>(
12836            &mut payload,
12837            0x3f22f2a293d3cdac,
12838            fidl::encoding::DynamicFlags::FLEXIBLE,
12839        )
12840    }
12841}
12842
12843#[cfg(target_os = "fuchsia")]
12844impl From<NodeSynchronousProxy> for zx::Handle {
12845    fn from(value: NodeSynchronousProxy) -> Self {
12846        value.into_channel().into()
12847    }
12848}
12849
12850#[cfg(target_os = "fuchsia")]
12851impl From<fidl::Channel> for NodeSynchronousProxy {
12852    fn from(value: fidl::Channel) -> Self {
12853        Self::new(value)
12854    }
12855}
12856
12857#[derive(Debug, Clone)]
12858pub struct NodeProxy {
12859    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
12860}
12861
12862impl fidl::endpoints::Proxy for NodeProxy {
12863    type Protocol = NodeMarker;
12864
12865    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
12866        Self::new(inner)
12867    }
12868
12869    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
12870        self.client.into_channel().map_err(|client| Self { client })
12871    }
12872
12873    fn as_channel(&self) -> &::fidl::AsyncChannel {
12874        self.client.as_channel()
12875    }
12876}
12877
12878impl NodeProxy {
12879    /// Create a new Proxy for fuchsia.sysmem2/Node.
12880    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
12881        let protocol_name = <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
12882        Self { client: fidl::client::Client::new(channel, protocol_name) }
12883    }
12884
12885    /// Get a Stream of events from the remote end of the protocol.
12886    ///
12887    /// # Panics
12888    ///
12889    /// Panics if the event stream was already taken.
12890    pub fn take_event_stream(&self) -> NodeEventStream {
12891        NodeEventStream { event_receiver: self.client.take_event_receiver() }
12892    }
12893
12894    /// Ensure that previous messages have been received server side. This is
12895    /// particularly useful after previous messages that created new tokens,
12896    /// because a token must be known to the sysmem server before sending the
12897    /// token to another participant.
12898    ///
12899    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
12900    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
12901    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
12902    /// to mitigate the possibility of a hostile/fake
12903    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
12904    /// Another way is to pass the token to
12905    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
12906    /// the token as part of exchanging it for a
12907    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
12908    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
12909    /// of stalling.
12910    ///
12911    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
12912    /// and then starting and completing a `Sync`, it's then safe to send the
12913    /// `BufferCollectionToken` client ends to other participants knowing the
12914    /// server will recognize the tokens when they're sent by the other
12915    /// participants to sysmem in a
12916    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
12917    /// efficient way to create tokens while avoiding unnecessary round trips.
12918    ///
12919    /// Other options include waiting for each
12920    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
12921    /// individually (using separate call to `Sync` after each), or calling
12922    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
12923    /// converted to a `BufferCollection` via
12924    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
12925    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
12926    /// the sync step and can create multiple tokens at once.
12927    pub fn r#sync(
12928        &self,
12929    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
12930        NodeProxyInterface::r#sync(self)
12931    }
12932
12933    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
12934    ///
12935    /// Normally a participant will convert a `BufferCollectionToken` into a
12936    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
12937    /// `Release` via the token (and then close the channel immediately or
12938    /// shortly later in response to server closing the server end), which
12939    /// avoids causing buffer collection failure. Without a prior `Release`,
12940    /// closing the `BufferCollectionToken` client end will cause buffer
12941    /// collection failure.
12942    ///
12943    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
12944    ///
12945    /// By default the server handles unexpected closure of a
12946    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
12947    /// first) by failing the buffer collection. Partly this is to expedite
12948    /// closing VMO handles to reclaim memory when any participant fails. If a
12949    /// participant would like to cleanly close a `BufferCollection` without
12950    /// causing buffer collection failure, the participant can send `Release`
12951    /// before closing the `BufferCollection` client end. The `Release` can
12952    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
12953    /// buffer collection won't require constraints from this node in order to
12954    /// allocate. If after `SetConstraints`, the constraints are retained and
12955    /// aggregated, despite the lack of `BufferCollection` connection at the
12956    /// time of constraints aggregation.
12957    ///
12958    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
12959    ///
12960    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
12961    /// end (without `Release` first) will trigger failure of the buffer
12962    /// collection. To close a `BufferCollectionTokenGroup` channel without
12963    /// failing the buffer collection, ensure that AllChildrenPresent() has been
12964    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
12965    /// client end.
12966    ///
12967    /// If `Release` occurs before
12968    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
12969    /// buffer collection will fail (triggered by reception of `Release` without
12970    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
12971    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
12972    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
12973    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
12974    /// close requires `AllChildrenPresent` (if not already sent), then
12975    /// `Release`, then close client end.
12976    ///
12977    /// If `Release` occurs after `AllChildrenPresent`, the children and all
12978    /// their constraints remain intact (just as they would if the
12979    /// `BufferCollectionTokenGroup` channel had remained open), and the client
12980    /// end close doesn't trigger buffer collection failure.
12981    ///
12982    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
12983    ///
12984    /// For brevity, the per-channel-protocol paragraphs above ignore the
12985    /// separate failure domain created by
12986    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
12987    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
12988    /// unexpectedly closes (without `Release` first) and that client end is
12989    /// under a failure domain, instead of failing the whole buffer collection,
12990    /// the failure domain is failed, but the buffer collection itself is
12991    /// isolated from failure of the failure domain. Such failure domains can be
12992    /// nested, in which case only the inner-most failure domain in which the
12993    /// `Node` resides fails.
12994    pub fn r#release(&self) -> Result<(), fidl::Error> {
12995        NodeProxyInterface::r#release(self)
12996    }
12997
12998    /// Set a name for VMOs in this buffer collection.
12999    ///
13000    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
13001    /// will be truncated to fit. The name of the vmo will be suffixed with the
13002    /// buffer index within the collection (if the suffix fits within
13003    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
13004    /// listed in the inspect data.
13005    ///
13006    /// The name only affects VMOs allocated after the name is set; this call
13007    /// does not rename existing VMOs. If multiple clients set different names
13008    /// then the larger priority value will win. Setting a new name with the
13009    /// same priority as a prior name doesn't change the name.
13010    ///
13011    /// All table fields are currently required.
13012    ///
13013    /// + request `priority` The name is only set if this is the first `SetName`
13014    ///   or if `priority` is greater than any previous `priority` value in
13015    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
13016    /// + request `name` The name for VMOs created under this buffer collection.
13017    pub fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
13018        NodeProxyInterface::r#set_name(self, payload)
13019    }
13020
13021    /// Set information about the current client that can be used by sysmem to
13022    /// help diagnose leaking memory and allocation stalls waiting for a
13023    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
13024    ///
13025    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
13026    /// `Node`(s) derived from this `Node`, unless overriden by
13027    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
13028    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
13029    ///
13030    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
13031    /// `Allocator` is the most efficient way to ensure that all
13032    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
13033    /// set, and is also more efficient than separately sending the same debug
13034    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
13035    /// created [`fuchsia.sysmem2/Node`].
13036    ///
13037    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
13038    /// indicate which client is closing their channel first, leading to subtree
13039    /// failure (which can be normal if the purpose of the subtree is over, but
13040    /// if happening earlier than expected, the client-channel-specific name can
13041    /// help diagnose where the failure is first coming from, from sysmem's
13042    /// point of view).
13043    ///
13044    /// All table fields are currently required.
13045    ///
13046    /// + request `name` This can be an arbitrary string, but the current
13047    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
13048    /// + request `id` This can be an arbitrary id, but the current process ID
13049    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
13050    pub fn r#set_debug_client_info(
13051        &self,
13052        mut payload: &NodeSetDebugClientInfoRequest,
13053    ) -> Result<(), fidl::Error> {
13054        NodeProxyInterface::r#set_debug_client_info(self, payload)
13055    }
13056
13057    /// Sysmem logs a warning if sysmem hasn't seen
13058    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
13059    /// within 5 seconds after creation of a new collection.
13060    ///
13061    /// Clients can call this method to change when the log is printed. If
13062    /// multiple client set the deadline, it's unspecified which deadline will
13063    /// take effect.
13064    ///
13065    /// In most cases the default works well.
13066    ///
13067    /// All table fields are currently required.
13068    ///
13069    /// + request `deadline` The time at which sysmem will start trying to log
13070    ///   the warning, unless all constraints are with sysmem by then.
13071    pub fn r#set_debug_timeout_log_deadline(
13072        &self,
13073        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
13074    ) -> Result<(), fidl::Error> {
13075        NodeProxyInterface::r#set_debug_timeout_log_deadline(self, payload)
13076    }
13077
13078    /// This enables verbose logging for the buffer collection.
13079    ///
13080    /// Verbose logging includes constraints set via
13081    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
13082    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
13083    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
13084    /// the tree of `Node`(s).
13085    ///
13086    /// Normally sysmem prints only a single line complaint when aggregation
13087    /// fails, with just the specific detailed reason that aggregation failed,
13088    /// with little surrounding context.  While this is often enough to diagnose
13089    /// a problem if only a small change was made and everything was working
13090    /// before the small change, it's often not particularly helpful for getting
13091    /// a new buffer collection to work for the first time.  Especially with
13092    /// more complex trees of nodes, involving things like
13093    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
13094    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
13095    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
13096    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
13097    /// looks like and why it's failing a logical allocation, or why a tree or
13098    /// subtree is failing sooner than expected.
13099    ///
13100    /// The intent of the extra logging is to be acceptable from a performance
13101    /// point of view, under the assumption that verbose logging is only enabled
13102    /// on a low number of buffer collections. If we're not tracking down a bug,
13103    /// we shouldn't send this message.
13104    pub fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
13105        NodeProxyInterface::r#set_verbose_logging(self)
13106    }
13107
13108    /// This gets a handle that can be used as a parameter to
13109    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
13110    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
13111    /// client obtained this handle from this `Node`.
13112    ///
13113    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
13114    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
13115    /// despite the two calls typically being on different channels.
13116    ///
13117    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
13118    ///
13119    /// All table fields are currently required.
13120    ///
13121    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
13122    ///   different `Node` channel, to prove that the client obtained the handle
13123    ///   from this `Node`.
13124    pub fn r#get_node_ref(
13125        &self,
13126    ) -> fidl::client::QueryResponseFut<
13127        NodeGetNodeRefResponse,
13128        fidl::encoding::DefaultFuchsiaResourceDialect,
13129    > {
13130        NodeProxyInterface::r#get_node_ref(self)
13131    }
13132
13133    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
13134    /// rooted at a different child token of a common parent
13135    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
13136    /// passed-in `node_ref`.
13137    ///
13138    /// This call is for assisting with admission control de-duplication, and
13139    /// with debugging.
13140    ///
13141    /// The `node_ref` must be obtained using
13142    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
13143    ///
13144    /// The `node_ref` can be a duplicated handle; it's not necessary to call
13145    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
13146    ///
13147    /// If a calling token may not actually be a valid token at all due to a
13148    /// potentially hostile/untrusted provider of the token, call
13149    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
13150    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
13151    /// never responds due to a calling token not being a real token (not really
13152    /// talking to sysmem).  Another option is to call
13153    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
13154    /// which also validates the token along with converting it to a
13155    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
13156    ///
13157    /// All table fields are currently required.
13158    ///
13159    /// - response `is_alternate`
13160    ///   - true: The first parent node in common between the calling node and
13161    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
13162    ///     that the calling `Node` and the `node_ref` `Node` will not have both
13163    ///     their constraints apply - rather sysmem will choose one or the other
13164    ///     of the constraints - never both.  This is because only one child of
13165    ///     a `BufferCollectionTokenGroup` is selected during logical
13166    ///     allocation, with only that one child's subtree contributing to
13167    ///     constraints aggregation.
13168    ///   - false: The first parent node in common between the calling `Node`
13169    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
13170    ///     Currently, this means the first parent node in common is a
13171    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
13172    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
13173    ///     `Node` may have both their constraints apply during constraints
13174    ///     aggregation of the logical allocation, if both `Node`(s) are
13175    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
13176    ///     this case, there is no `BufferCollectionTokenGroup` that will
13177    ///     directly prevent the two `Node`(s) from both being selected and
13178    ///     their constraints both aggregated, but even when false, one or both
13179    ///     `Node`(s) may still be eliminated from consideration if one or both
13180    ///     `Node`(s) has a direct or indirect parent
13181    ///     `BufferCollectionTokenGroup` which selects a child subtree other
13182    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
13183    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
13184    ///   associated with the same buffer collection as the calling `Node`.
13185    ///   Another reason for this error is if the `node_ref` is an
13186    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
13187    ///   a real `node_ref` obtained from `GetNodeRef`.
13188    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
13189    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
13190    ///   the needed rights expected on a real `node_ref`.
13191    /// * No other failing status codes are returned by this call.  However,
13192    ///   sysmem may add additional codes in future, so the client should have
13193    ///   sensible default handling for any failing status code.
13194    pub fn r#is_alternate_for(
13195        &self,
13196        mut payload: NodeIsAlternateForRequest,
13197    ) -> fidl::client::QueryResponseFut<
13198        NodeIsAlternateForResult,
13199        fidl::encoding::DefaultFuchsiaResourceDialect,
13200    > {
13201        NodeProxyInterface::r#is_alternate_for(self, payload)
13202    }
13203
13204    /// Get the buffer collection ID. This ID is also available from
13205    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
13206    /// within the collection).
13207    ///
13208    /// This call is mainly useful in situations where we can't convey a
13209    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
13210    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
13211    /// handle, which can be joined back up with a `BufferCollection` client end
13212    /// that was created via a different path. Prefer to convey a
13213    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
13214    ///
13215    /// Trusting a `buffer_collection_id` value from a source other than sysmem
13216    /// is analogous to trusting a koid value from a source other than zircon.
13217    /// Both should be avoided unless really necessary, and both require
13218    /// caution. In some situations it may be reasonable to refer to a
13219    /// pre-established `BufferCollection` by `buffer_collection_id` via a
13220    /// protocol for efficiency reasons, but an incoming value purporting to be
13221    /// a `buffer_collection_id` is not sufficient alone to justify granting the
13222    /// sender of the `buffer_collection_id` any capability. The sender must
13223    /// first prove to a receiver that the sender has/had a VMO or has/had a
13224    /// `BufferCollectionToken` to the same collection by sending a handle that
13225    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
13226    /// `buffer_collection_id` value. The receiver should take care to avoid
13227    /// assuming that a sender had a `BufferCollectionToken` in cases where the
13228    /// sender has only proven that the sender had a VMO.
13229    ///
13230    /// - response `buffer_collection_id` This ID is unique per buffer
13231    ///   collection per boot. Each buffer is uniquely identified by the
13232    ///   `buffer_collection_id` and `buffer_index` together.
13233    pub fn r#get_buffer_collection_id(
13234        &self,
13235    ) -> fidl::client::QueryResponseFut<
13236        NodeGetBufferCollectionIdResponse,
13237        fidl::encoding::DefaultFuchsiaResourceDialect,
13238    > {
13239        NodeProxyInterface::r#get_buffer_collection_id(self)
13240    }
13241
13242    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
13243    /// created after this message to weak, which means that a client's `Node`
13244    /// client end (or a child created after this message) is not alone
13245    /// sufficient to keep allocated VMOs alive.
13246    ///
13247    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
13248    /// `close_weak_asap`.
13249    ///
13250    /// This message is only permitted before the `Node` becomes ready for
13251    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
13252    ///   * `BufferCollectionToken`: any time
13253    ///   * `BufferCollection`: before `SetConstraints`
13254    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
13255    ///
13256    /// Currently, no conversion from strong `Node` to weak `Node` after ready
13257    /// for allocation is provided, but a client can simulate that by creating
13258    /// an additional `Node` before allocation and setting that additional
13259    /// `Node` to weak, and then potentially at some point later sending
13260    /// `Release` and closing the client end of the client's strong `Node`, but
13261    /// keeping the client's weak `Node`.
13262    ///
13263    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
13264    /// collection failure (all `Node` client end(s) will see
13265    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
13266    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
13267    /// this situation until all `Node`(s) are ready for allocation. For initial
13268    /// allocation to succeed, at least one strong `Node` is required to exist
13269    /// at allocation time, but after that client receives VMO handles, that
13270    /// client can `BufferCollection.Release` and close the client end without
13271    /// causing this type of failure.
13272    ///
13273    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
13274    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
13275    /// separately as appropriate.
13276    pub fn r#set_weak(&self) -> Result<(), fidl::Error> {
13277        NodeProxyInterface::r#set_weak(self)
13278    }
13279
13280    /// This indicates to sysmem that the client is prepared to pay attention to
13281    /// `close_weak_asap`.
13282    ///
13283    /// If sent, this message must be before
13284    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
13285    ///
13286    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
13287    /// send this message before `WaitForAllBuffersAllocated`, or a parent
13288    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
13289    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
13290    /// trigger buffer collection failure.
13291    ///
13292    /// This message is necessary because weak sysmem VMOs have not always been
13293    /// a thing, so older clients are not aware of the need to pay attention to
13294    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
13295    /// sysmem weak VMO handles asap. By having this message and requiring
13296    /// participants to indicate their acceptance of this aspect of the overall
13297    /// protocol, we avoid situations where an older client is delivered a weak
13298    /// VMO without any way for sysmem to get that VMO to close quickly later
13299    /// (and on a per-buffer basis).
13300    ///
13301    /// A participant that doesn't handle `close_weak_asap` and also doesn't
13302    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
13303    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
13304    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
13305    /// same participant has a child/delegate which does retrieve VMOs, that
13306    /// child/delegate will need to send `SetWeakOk` before
13307    /// `WaitForAllBuffersAllocated`.
13308    ///
13309    /// + request `for_child_nodes_also` If present and true, this means direct
13310    ///   child nodes of this node created after this message plus all
13311    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
13312    ///   those nodes. Any child node of this node that was created before this
13313    ///   message is not included. This setting is "sticky" in the sense that a
13314    ///   subsequent `SetWeakOk` without this bool set to true does not reset
13315    ///   the server-side bool. If this creates a problem for a participant, a
13316    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
13317    ///   tokens instead, as appropriate. A participant should only set
13318    ///   `for_child_nodes_also` true if the participant can really promise to
13319    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
13320    ///   weak VMO handles held by participants holding the corresponding child
13321    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
13322    ///   which are using sysmem(1) can be weak, despite the clients of those
13323    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
13324    ///   direct way to find out about `close_weak_asap`. This only applies to
13325    ///   descendents of this `Node` which are using sysmem(1), not to this
13326    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
13327    ///   token, which will fail allocation unless an ancestor of this `Node`
13328    ///   specified `for_child_nodes_also` true.
13329    pub fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
13330        NodeProxyInterface::r#set_weak_ok(self, payload)
13331    }
13332
13333    /// The server_end will be closed after this `Node` and any child nodes have
13334    /// have released their buffer counts, making those counts available for
13335    /// reservation by a different `Node` via
13336    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
13337    ///
13338    /// The `Node` buffer counts may not be released until the entire tree of
13339    /// `Node`(s) is closed or failed, because
13340    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
13341    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
13342    /// `Node` buffer counts remain reserved until the orphaned node is later
13343    /// cleaned up.
13344    ///
13345    /// If the `Node` exceeds a fairly large number of attached eventpair server
13346    /// ends, a log message will indicate this and the `Node` (and the
13347    /// appropriate) sub-tree will fail.
13348    ///
13349    /// The `server_end` will remain open when
13350    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
13351    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
13352    /// [`fuchsia.sysmem2/BufferCollection`].
13353    ///
13354    /// This message can also be used with a
13355    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
13356    pub fn r#attach_node_tracking(
13357        &self,
13358        mut payload: NodeAttachNodeTrackingRequest,
13359    ) -> Result<(), fidl::Error> {
13360        NodeProxyInterface::r#attach_node_tracking(self, payload)
13361    }
13362}
13363
13364impl NodeProxyInterface for NodeProxy {
13365    type SyncResponseFut =
13366        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
13367    fn r#sync(&self) -> Self::SyncResponseFut {
13368        fn _decode(
13369            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13370        ) -> Result<(), fidl::Error> {
13371            let _response = fidl::client::decode_transaction_body::<
13372                fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>,
13373                fidl::encoding::DefaultFuchsiaResourceDialect,
13374                0x11ac2555cf575b54,
13375            >(_buf?)?
13376            .into_result::<NodeMarker>("sync")?;
13377            Ok(_response)
13378        }
13379        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, ()>(
13380            (),
13381            0x11ac2555cf575b54,
13382            fidl::encoding::DynamicFlags::FLEXIBLE,
13383            _decode,
13384        )
13385    }
13386
13387    fn r#release(&self) -> Result<(), fidl::Error> {
13388        self.client.send::<fidl::encoding::EmptyPayload>(
13389            (),
13390            0x6a5cae7d6d6e04c6,
13391            fidl::encoding::DynamicFlags::FLEXIBLE,
13392        )
13393    }
13394
13395    fn r#set_name(&self, mut payload: &NodeSetNameRequest) -> Result<(), fidl::Error> {
13396        self.client.send::<NodeSetNameRequest>(
13397            payload,
13398            0xb41f1624f48c1e9,
13399            fidl::encoding::DynamicFlags::FLEXIBLE,
13400        )
13401    }
13402
13403    fn r#set_debug_client_info(
13404        &self,
13405        mut payload: &NodeSetDebugClientInfoRequest,
13406    ) -> Result<(), fidl::Error> {
13407        self.client.send::<NodeSetDebugClientInfoRequest>(
13408            payload,
13409            0x5cde8914608d99b1,
13410            fidl::encoding::DynamicFlags::FLEXIBLE,
13411        )
13412    }
13413
13414    fn r#set_debug_timeout_log_deadline(
13415        &self,
13416        mut payload: &NodeSetDebugTimeoutLogDeadlineRequest,
13417    ) -> Result<(), fidl::Error> {
13418        self.client.send::<NodeSetDebugTimeoutLogDeadlineRequest>(
13419            payload,
13420            0x716b0af13d5c0806,
13421            fidl::encoding::DynamicFlags::FLEXIBLE,
13422        )
13423    }
13424
13425    fn r#set_verbose_logging(&self) -> Result<(), fidl::Error> {
13426        self.client.send::<fidl::encoding::EmptyPayload>(
13427            (),
13428            0x5209c77415b4dfad,
13429            fidl::encoding::DynamicFlags::FLEXIBLE,
13430        )
13431    }
13432
13433    type GetNodeRefResponseFut = fidl::client::QueryResponseFut<
13434        NodeGetNodeRefResponse,
13435        fidl::encoding::DefaultFuchsiaResourceDialect,
13436    >;
13437    fn r#get_node_ref(&self) -> Self::GetNodeRefResponseFut {
13438        fn _decode(
13439            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13440        ) -> Result<NodeGetNodeRefResponse, fidl::Error> {
13441            let _response = fidl::client::decode_transaction_body::<
13442                fidl::encoding::FlexibleType<NodeGetNodeRefResponse>,
13443                fidl::encoding::DefaultFuchsiaResourceDialect,
13444                0x5b3d0e51614df053,
13445            >(_buf?)?
13446            .into_result::<NodeMarker>("get_node_ref")?;
13447            Ok(_response)
13448        }
13449        self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, NodeGetNodeRefResponse>(
13450            (),
13451            0x5b3d0e51614df053,
13452            fidl::encoding::DynamicFlags::FLEXIBLE,
13453            _decode,
13454        )
13455    }
13456
13457    type IsAlternateForResponseFut = fidl::client::QueryResponseFut<
13458        NodeIsAlternateForResult,
13459        fidl::encoding::DefaultFuchsiaResourceDialect,
13460    >;
13461    fn r#is_alternate_for(
13462        &self,
13463        mut payload: NodeIsAlternateForRequest,
13464    ) -> Self::IsAlternateForResponseFut {
13465        fn _decode(
13466            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13467        ) -> Result<NodeIsAlternateForResult, fidl::Error> {
13468            let _response = fidl::client::decode_transaction_body::<
13469                fidl::encoding::FlexibleResultType<NodeIsAlternateForResponse, Error>,
13470                fidl::encoding::DefaultFuchsiaResourceDialect,
13471                0x3a58e00157e0825,
13472            >(_buf?)?
13473            .into_result::<NodeMarker>("is_alternate_for")?;
13474            Ok(_response.map(|x| x))
13475        }
13476        self.client.send_query_and_decode::<NodeIsAlternateForRequest, NodeIsAlternateForResult>(
13477            &mut payload,
13478            0x3a58e00157e0825,
13479            fidl::encoding::DynamicFlags::FLEXIBLE,
13480            _decode,
13481        )
13482    }
13483
13484    type GetBufferCollectionIdResponseFut = fidl::client::QueryResponseFut<
13485        NodeGetBufferCollectionIdResponse,
13486        fidl::encoding::DefaultFuchsiaResourceDialect,
13487    >;
13488    fn r#get_buffer_collection_id(&self) -> Self::GetBufferCollectionIdResponseFut {
13489        fn _decode(
13490            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
13491        ) -> Result<NodeGetBufferCollectionIdResponse, fidl::Error> {
13492            let _response = fidl::client::decode_transaction_body::<
13493                fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>,
13494                fidl::encoding::DefaultFuchsiaResourceDialect,
13495                0x77d19a494b78ba8c,
13496            >(_buf?)?
13497            .into_result::<NodeMarker>("get_buffer_collection_id")?;
13498            Ok(_response)
13499        }
13500        self.client.send_query_and_decode::<
13501            fidl::encoding::EmptyPayload,
13502            NodeGetBufferCollectionIdResponse,
13503        >(
13504            (),
13505            0x77d19a494b78ba8c,
13506            fidl::encoding::DynamicFlags::FLEXIBLE,
13507            _decode,
13508        )
13509    }
13510
13511    fn r#set_weak(&self) -> Result<(), fidl::Error> {
13512        self.client.send::<fidl::encoding::EmptyPayload>(
13513            (),
13514            0x22dd3ea514eeffe1,
13515            fidl::encoding::DynamicFlags::FLEXIBLE,
13516        )
13517    }
13518
13519    fn r#set_weak_ok(&self, mut payload: NodeSetWeakOkRequest) -> Result<(), fidl::Error> {
13520        self.client.send::<NodeSetWeakOkRequest>(
13521            &mut payload,
13522            0x38a44fc4d7724be9,
13523            fidl::encoding::DynamicFlags::FLEXIBLE,
13524        )
13525    }
13526
13527    fn r#attach_node_tracking(
13528        &self,
13529        mut payload: NodeAttachNodeTrackingRequest,
13530    ) -> Result<(), fidl::Error> {
13531        self.client.send::<NodeAttachNodeTrackingRequest>(
13532            &mut payload,
13533            0x3f22f2a293d3cdac,
13534            fidl::encoding::DynamicFlags::FLEXIBLE,
13535        )
13536    }
13537}
13538
13539pub struct NodeEventStream {
13540    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
13541}
13542
13543impl std::marker::Unpin for NodeEventStream {}
13544
13545impl futures::stream::FusedStream for NodeEventStream {
13546    fn is_terminated(&self) -> bool {
13547        self.event_receiver.is_terminated()
13548    }
13549}
13550
13551impl futures::Stream for NodeEventStream {
13552    type Item = Result<NodeEvent, fidl::Error>;
13553
13554    fn poll_next(
13555        mut self: std::pin::Pin<&mut Self>,
13556        cx: &mut std::task::Context<'_>,
13557    ) -> std::task::Poll<Option<Self::Item>> {
13558        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
13559            &mut self.event_receiver,
13560            cx
13561        )?) {
13562            Some(buf) => std::task::Poll::Ready(Some(NodeEvent::decode(buf))),
13563            None => std::task::Poll::Ready(None),
13564        }
13565    }
13566}
13567
13568#[derive(Debug)]
13569pub enum NodeEvent {
13570    #[non_exhaustive]
13571    _UnknownEvent {
13572        /// Ordinal of the event that was sent.
13573        ordinal: u64,
13574    },
13575}
13576
13577impl NodeEvent {
13578    /// Decodes a message buffer as a [`NodeEvent`].
13579    fn decode(
13580        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
13581    ) -> Result<NodeEvent, fidl::Error> {
13582        let (bytes, _handles) = buf.split_mut();
13583        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
13584        debug_assert_eq!(tx_header.tx_id, 0);
13585        match tx_header.ordinal {
13586            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
13587                Ok(NodeEvent::_UnknownEvent { ordinal: tx_header.ordinal })
13588            }
13589            _ => Err(fidl::Error::UnknownOrdinal {
13590                ordinal: tx_header.ordinal,
13591                protocol_name: <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
13592            }),
13593        }
13594    }
13595}
13596
13597/// A Stream of incoming requests for fuchsia.sysmem2/Node.
13598pub struct NodeRequestStream {
13599    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
13600    is_terminated: bool,
13601}
13602
13603impl std::marker::Unpin for NodeRequestStream {}
13604
13605impl futures::stream::FusedStream for NodeRequestStream {
13606    fn is_terminated(&self) -> bool {
13607        self.is_terminated
13608    }
13609}
13610
13611impl fidl::endpoints::RequestStream for NodeRequestStream {
13612    type Protocol = NodeMarker;
13613    type ControlHandle = NodeControlHandle;
13614
13615    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
13616        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
13617    }
13618
13619    fn control_handle(&self) -> Self::ControlHandle {
13620        NodeControlHandle { inner: self.inner.clone() }
13621    }
13622
13623    fn into_inner(
13624        self,
13625    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
13626    {
13627        (self.inner, self.is_terminated)
13628    }
13629
13630    fn from_inner(
13631        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
13632        is_terminated: bool,
13633    ) -> Self {
13634        Self { inner, is_terminated }
13635    }
13636}
13637
13638impl futures::Stream for NodeRequestStream {
13639    type Item = Result<NodeRequest, fidl::Error>;
13640
13641    fn poll_next(
13642        mut self: std::pin::Pin<&mut Self>,
13643        cx: &mut std::task::Context<'_>,
13644    ) -> std::task::Poll<Option<Self::Item>> {
13645        let this = &mut *self;
13646        if this.inner.check_shutdown(cx) {
13647            this.is_terminated = true;
13648            return std::task::Poll::Ready(None);
13649        }
13650        if this.is_terminated {
13651            panic!("polled NodeRequestStream after completion");
13652        }
13653        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
13654            |bytes, handles| {
13655                match this.inner.channel().read_etc(cx, bytes, handles) {
13656                    std::task::Poll::Ready(Ok(())) => {}
13657                    std::task::Poll::Pending => return std::task::Poll::Pending,
13658                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
13659                        this.is_terminated = true;
13660                        return std::task::Poll::Ready(None);
13661                    }
13662                    std::task::Poll::Ready(Err(e)) => {
13663                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
13664                            e.into(),
13665                        ))))
13666                    }
13667                }
13668
13669                // A message has been received from the channel
13670                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
13671
13672                std::task::Poll::Ready(Some(match header.ordinal {
13673                    0x11ac2555cf575b54 => {
13674                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13675                        let mut req = fidl::new_empty!(
13676                            fidl::encoding::EmptyPayload,
13677                            fidl::encoding::DefaultFuchsiaResourceDialect
13678                        );
13679                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13680                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13681                        Ok(NodeRequest::Sync {
13682                            responder: NodeSyncResponder {
13683                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13684                                tx_id: header.tx_id,
13685                            },
13686                        })
13687                    }
13688                    0x6a5cae7d6d6e04c6 => {
13689                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13690                        let mut req = fidl::new_empty!(
13691                            fidl::encoding::EmptyPayload,
13692                            fidl::encoding::DefaultFuchsiaResourceDialect
13693                        );
13694                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13695                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13696                        Ok(NodeRequest::Release { control_handle })
13697                    }
13698                    0xb41f1624f48c1e9 => {
13699                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13700                        let mut req = fidl::new_empty!(
13701                            NodeSetNameRequest,
13702                            fidl::encoding::DefaultFuchsiaResourceDialect
13703                        );
13704                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetNameRequest>(&header, _body_bytes, handles, &mut req)?;
13705                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13706                        Ok(NodeRequest::SetName { payload: req, control_handle })
13707                    }
13708                    0x5cde8914608d99b1 => {
13709                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13710                        let mut req = fidl::new_empty!(
13711                            NodeSetDebugClientInfoRequest,
13712                            fidl::encoding::DefaultFuchsiaResourceDialect
13713                        );
13714                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugClientInfoRequest>(&header, _body_bytes, handles, &mut req)?;
13715                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13716                        Ok(NodeRequest::SetDebugClientInfo { payload: req, control_handle })
13717                    }
13718                    0x716b0af13d5c0806 => {
13719                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13720                        let mut req = fidl::new_empty!(
13721                            NodeSetDebugTimeoutLogDeadlineRequest,
13722                            fidl::encoding::DefaultFuchsiaResourceDialect
13723                        );
13724                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetDebugTimeoutLogDeadlineRequest>(&header, _body_bytes, handles, &mut req)?;
13725                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13726                        Ok(NodeRequest::SetDebugTimeoutLogDeadline { payload: req, control_handle })
13727                    }
13728                    0x5209c77415b4dfad => {
13729                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13730                        let mut req = fidl::new_empty!(
13731                            fidl::encoding::EmptyPayload,
13732                            fidl::encoding::DefaultFuchsiaResourceDialect
13733                        );
13734                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13735                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13736                        Ok(NodeRequest::SetVerboseLogging { control_handle })
13737                    }
13738                    0x5b3d0e51614df053 => {
13739                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13740                        let mut req = fidl::new_empty!(
13741                            fidl::encoding::EmptyPayload,
13742                            fidl::encoding::DefaultFuchsiaResourceDialect
13743                        );
13744                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13745                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13746                        Ok(NodeRequest::GetNodeRef {
13747                            responder: NodeGetNodeRefResponder {
13748                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13749                                tx_id: header.tx_id,
13750                            },
13751                        })
13752                    }
13753                    0x3a58e00157e0825 => {
13754                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13755                        let mut req = fidl::new_empty!(
13756                            NodeIsAlternateForRequest,
13757                            fidl::encoding::DefaultFuchsiaResourceDialect
13758                        );
13759                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeIsAlternateForRequest>(&header, _body_bytes, handles, &mut req)?;
13760                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13761                        Ok(NodeRequest::IsAlternateFor {
13762                            payload: req,
13763                            responder: NodeIsAlternateForResponder {
13764                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13765                                tx_id: header.tx_id,
13766                            },
13767                        })
13768                    }
13769                    0x77d19a494b78ba8c => {
13770                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
13771                        let mut req = fidl::new_empty!(
13772                            fidl::encoding::EmptyPayload,
13773                            fidl::encoding::DefaultFuchsiaResourceDialect
13774                        );
13775                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13776                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13777                        Ok(NodeRequest::GetBufferCollectionId {
13778                            responder: NodeGetBufferCollectionIdResponder {
13779                                control_handle: std::mem::ManuallyDrop::new(control_handle),
13780                                tx_id: header.tx_id,
13781                            },
13782                        })
13783                    }
13784                    0x22dd3ea514eeffe1 => {
13785                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13786                        let mut req = fidl::new_empty!(
13787                            fidl::encoding::EmptyPayload,
13788                            fidl::encoding::DefaultFuchsiaResourceDialect
13789                        );
13790                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
13791                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13792                        Ok(NodeRequest::SetWeak { control_handle })
13793                    }
13794                    0x38a44fc4d7724be9 => {
13795                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13796                        let mut req = fidl::new_empty!(
13797                            NodeSetWeakOkRequest,
13798                            fidl::encoding::DefaultFuchsiaResourceDialect
13799                        );
13800                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeSetWeakOkRequest>(&header, _body_bytes, handles, &mut req)?;
13801                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13802                        Ok(NodeRequest::SetWeakOk { payload: req, control_handle })
13803                    }
13804                    0x3f22f2a293d3cdac => {
13805                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
13806                        let mut req = fidl::new_empty!(
13807                            NodeAttachNodeTrackingRequest,
13808                            fidl::encoding::DefaultFuchsiaResourceDialect
13809                        );
13810                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<NodeAttachNodeTrackingRequest>(&header, _body_bytes, handles, &mut req)?;
13811                        let control_handle = NodeControlHandle { inner: this.inner.clone() };
13812                        Ok(NodeRequest::AttachNodeTracking { payload: req, control_handle })
13813                    }
13814                    _ if header.tx_id == 0
13815                        && header
13816                            .dynamic_flags()
13817                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
13818                    {
13819                        Ok(NodeRequest::_UnknownMethod {
13820                            ordinal: header.ordinal,
13821                            control_handle: NodeControlHandle { inner: this.inner.clone() },
13822                            method_type: fidl::MethodType::OneWay,
13823                        })
13824                    }
13825                    _ if header
13826                        .dynamic_flags()
13827                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
13828                    {
13829                        this.inner.send_framework_err(
13830                            fidl::encoding::FrameworkErr::UnknownMethod,
13831                            header.tx_id,
13832                            header.ordinal,
13833                            header.dynamic_flags(),
13834                            (bytes, handles),
13835                        )?;
13836                        Ok(NodeRequest::_UnknownMethod {
13837                            ordinal: header.ordinal,
13838                            control_handle: NodeControlHandle { inner: this.inner.clone() },
13839                            method_type: fidl::MethodType::TwoWay,
13840                        })
13841                    }
13842                    _ => Err(fidl::Error::UnknownOrdinal {
13843                        ordinal: header.ordinal,
13844                        protocol_name: <NodeMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
13845                    }),
13846                }))
13847            },
13848        )
13849    }
13850}
13851
13852/// This protocol is the parent protocol for all nodes in the tree established
13853/// by [`fuchsia.sysmem2/BufferCollectionToken`] creation and
13854/// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] creation, including
13855/// [`fuchsia.sysmem2/BufferCollectionToken`](s) which have since been converted
13856/// to a [`fuchsia.sysmem2/BufferCollection`] channel.
13857///
13858/// Epitaphs are not used in this protocol.
13859#[derive(Debug)]
13860pub enum NodeRequest {
13861    /// Ensure that previous messages have been received server side. This is
13862    /// particularly useful after previous messages that created new tokens,
13863    /// because a token must be known to the sysmem server before sending the
13864    /// token to another participant.
13865    ///
13866    /// Calling [`fuchsia.sysmem2/BufferCollectionToken.Sync`] on a token that
13867    /// isn't/wasn't a valid token risks the `Sync` stalling forever. See
13868    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] for one way
13869    /// to mitigate the possibility of a hostile/fake
13870    /// [`fuchsia.sysmem2/BufferCollectionToken`] at the cost of one round trip.
13871    /// Another way is to pass the token to
13872    /// [`fuchsia.sysmem2/Allocator/BindSharedCollection`], which also validates
13873    /// the token as part of exchanging it for a
13874    /// [`fuchsia.sysmem2/BufferCollection`] channel, and
13875    /// [`fuchsia.sysmem2/BufferCollection.Sync`] can then be used without risk
13876    /// of stalling.
13877    ///
13878    /// After creating one or more [`fuchsia.sysmem2/BufferCollectionToken`](s)
13879    /// and then starting and completing a `Sync`, it's then safe to send the
13880    /// `BufferCollectionToken` client ends to other participants knowing the
13881    /// server will recognize the tokens when they're sent by the other
13882    /// participants to sysmem in a
13883    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] message. This is an
13884    /// efficient way to create tokens while avoiding unnecessary round trips.
13885    ///
13886    /// Other options include waiting for each
13887    /// [`fuchsia.sysmem2/BufferCollectionToken.Duplicate`] to complete
13888    /// individually (using separate call to `Sync` after each), or calling
13889    /// [`fuchsia.sysmem2/BufferCollection.Sync`] after a token has been
13890    /// converted to a `BufferCollection` via
13891    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`], or using
13892    /// [`fuchsia.sysmem2/BufferCollectionToken.DuplicateSync`] which includes
13893    /// the sync step and can create multiple tokens at once.
13894    Sync { responder: NodeSyncResponder },
13895    /// ###### On a [`fuchsia.sysmem2/BufferCollectionToken`] channel:
13896    ///
13897    /// Normally a participant will convert a `BufferCollectionToken` into a
13898    /// [`fuchsia.sysmem2/BufferCollection`], but a participant can instead send
13899    /// `Release` via the token (and then close the channel immediately or
13900    /// shortly later in response to server closing the server end), which
13901    /// avoids causing buffer collection failure. Without a prior `Release`,
13902    /// closing the `BufferCollectionToken` client end will cause buffer
13903    /// collection failure.
13904    ///
13905    /// ###### On a [`fuchsia.sysmem2/BufferCollection`] channel:
13906    ///
13907    /// By default the server handles unexpected closure of a
13908    /// [`fuchsia.sysmem2/BufferCollection`] client end (without `Release`
13909    /// first) by failing the buffer collection. Partly this is to expedite
13910    /// closing VMO handles to reclaim memory when any participant fails. If a
13911    /// participant would like to cleanly close a `BufferCollection` without
13912    /// causing buffer collection failure, the participant can send `Release`
13913    /// before closing the `BufferCollection` client end. The `Release` can
13914    /// occur before or after `SetConstraints`. If before `SetConstraints`, the
13915    /// buffer collection won't require constraints from this node in order to
13916    /// allocate. If after `SetConstraints`, the constraints are retained and
13917    /// aggregated, despite the lack of `BufferCollection` connection at the
13918    /// time of constraints aggregation.
13919    ///
13920    /// ###### On a [`fuchsia.sysmem2/BufferCollectionTokenGroup`] channel:
13921    ///
13922    /// By default, unexpected closure of a `BufferCollectionTokenGroup` client
13923    /// end (without `Release` first) will trigger failure of the buffer
13924    /// collection. To close a `BufferCollectionTokenGroup` channel without
13925    /// failing the buffer collection, ensure that AllChildrenPresent() has been
13926    /// sent, and send `Release` before closing the `BufferCollectionTokenGroup`
13927    /// client end.
13928    ///
13929    /// If `Release` occurs before
13930    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup.AllChildrenPresent], the
13931    /// buffer collection will fail (triggered by reception of `Release` without
13932    /// prior `AllChildrenPresent`). This is intentionally not analogous to how
13933    /// [`fuchsia.sysmem2/BufferCollection.Release`] without
13934    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] first doesn't cause
13935    /// buffer collection failure. For a `BufferCollectionTokenGroup`, clean
13936    /// close requires `AllChildrenPresent` (if not already sent), then
13937    /// `Release`, then close client end.
13938    ///
13939    /// If `Release` occurs after `AllChildrenPresent`, the children and all
13940    /// their constraints remain intact (just as they would if the
13941    /// `BufferCollectionTokenGroup` channel had remained open), and the client
13942    /// end close doesn't trigger buffer collection failure.
13943    ///
13944    /// ###### On all [`fuchsia.sysmem2/Node`] channels (any of the above):
13945    ///
13946    /// For brevity, the per-channel-protocol paragraphs above ignore the
13947    /// separate failure domain created by
13948    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`] or
13949    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`]. When a client end
13950    /// unexpectedly closes (without `Release` first) and that client end is
13951    /// under a failure domain, instead of failing the whole buffer collection,
13952    /// the failure domain is failed, but the buffer collection itself is
13953    /// isolated from failure of the failure domain. Such failure domains can be
13954    /// nested, in which case only the inner-most failure domain in which the
13955    /// `Node` resides fails.
13956    Release { control_handle: NodeControlHandle },
13957    /// Set a name for VMOs in this buffer collection.
13958    ///
13959    /// If the name doesn't fit in ZX_MAX_NAME_LEN, the name of the vmo itself
13960    /// will be truncated to fit. The name of the vmo will be suffixed with the
13961    /// buffer index within the collection (if the suffix fits within
13962    /// ZX_MAX_NAME_LEN). The name specified here (without truncation) will be
13963    /// listed in the inspect data.
13964    ///
13965    /// The name only affects VMOs allocated after the name is set; this call
13966    /// does not rename existing VMOs. If multiple clients set different names
13967    /// then the larger priority value will win. Setting a new name with the
13968    /// same priority as a prior name doesn't change the name.
13969    ///
13970    /// All table fields are currently required.
13971    ///
13972    /// + request `priority` The name is only set if this is the first `SetName`
13973    ///   or if `priority` is greater than any previous `priority` value in
13974    ///   prior `SetName` calls across all `Node`(s) of this buffer collection.
13975    /// + request `name` The name for VMOs created under this buffer collection.
13976    SetName { payload: NodeSetNameRequest, control_handle: NodeControlHandle },
13977    /// Set information about the current client that can be used by sysmem to
13978    /// help diagnose leaking memory and allocation stalls waiting for a
13979    /// participant to send [`fuchsia.sysmem2/BufferCollection.SetConstraints`].
13980    ///
13981    /// This sets the debug client info on this [`fuchsia.sysmem2/Node`] and all
13982    /// `Node`(s) derived from this `Node`, unless overriden by
13983    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] or a later
13984    /// [`fuchsia.sysmem2/Node.SetDebugClientInfo`].
13985    ///
13986    /// Sending [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`] once per
13987    /// `Allocator` is the most efficient way to ensure that all
13988    /// [`fuchsia.sysmem2/Node`](s) will have at least some debug client info
13989    /// set, and is also more efficient than separately sending the same debug
13990    /// client info via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] for each
13991    /// created [`fuchsia.sysmem2/Node`].
13992    ///
13993    /// Also used when verbose logging is enabled (see `SetVerboseLogging`) to
13994    /// indicate which client is closing their channel first, leading to subtree
13995    /// failure (which can be normal if the purpose of the subtree is over, but
13996    /// if happening earlier than expected, the client-channel-specific name can
13997    /// help diagnose where the failure is first coming from, from sysmem's
13998    /// point of view).
13999    ///
14000    /// All table fields are currently required.
14001    ///
14002    /// + request `name` This can be an arbitrary string, but the current
14003    ///   process name (see `fsl::GetCurrentProcessName`) is a good default.
14004    /// + request `id` This can be an arbitrary id, but the current process ID
14005    ///   (see `fsl::GetCurrentProcessKoid`) is a good default.
14006    SetDebugClientInfo { payload: NodeSetDebugClientInfoRequest, control_handle: NodeControlHandle },
14007    /// Sysmem logs a warning if sysmem hasn't seen
14008    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from all clients
14009    /// within 5 seconds after creation of a new collection.
14010    ///
14011    /// Clients can call this method to change when the log is printed. If
14012    /// multiple client set the deadline, it's unspecified which deadline will
14013    /// take effect.
14014    ///
14015    /// In most cases the default works well.
14016    ///
14017    /// All table fields are currently required.
14018    ///
14019    /// + request `deadline` The time at which sysmem will start trying to log
14020    ///   the warning, unless all constraints are with sysmem by then.
14021    SetDebugTimeoutLogDeadline {
14022        payload: NodeSetDebugTimeoutLogDeadlineRequest,
14023        control_handle: NodeControlHandle,
14024    },
14025    /// This enables verbose logging for the buffer collection.
14026    ///
14027    /// Verbose logging includes constraints set via
14028    /// [`fuchsia.sysmem2/BufferCollection.SetConstraints`] from each client
14029    /// along with info set via [`fuchsia.sysmem2/Node.SetDebugClientInfo`] (or
14030    /// [`fuchsia.sysmem2/Allocator.SetDebugClientInfo`]) and the structure of
14031    /// the tree of `Node`(s).
14032    ///
14033    /// Normally sysmem prints only a single line complaint when aggregation
14034    /// fails, with just the specific detailed reason that aggregation failed,
14035    /// with little surrounding context.  While this is often enough to diagnose
14036    /// a problem if only a small change was made and everything was working
14037    /// before the small change, it's often not particularly helpful for getting
14038    /// a new buffer collection to work for the first time.  Especially with
14039    /// more complex trees of nodes, involving things like
14040    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`],
14041    /// [`fuchsia.sysmem2/BufferCollectionToken.SetDispensable`],
14042    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`] nodes, and associated
14043    /// subtrees of nodes, verbose logging may help in diagnosing what the tree
14044    /// looks like and why it's failing a logical allocation, or why a tree or
14045    /// subtree is failing sooner than expected.
14046    ///
14047    /// The intent of the extra logging is to be acceptable from a performance
14048    /// point of view, under the assumption that verbose logging is only enabled
14049    /// on a low number of buffer collections. If we're not tracking down a bug,
14050    /// we shouldn't send this message.
14051    SetVerboseLogging { control_handle: NodeControlHandle },
14052    /// This gets a handle that can be used as a parameter to
14053    /// [`fuchsia.sysmem2/Node.IsAlternateFor`] called on any
14054    /// [`fuchsia.sysmem2/Node`]. This handle is only for use as proof that the
14055    /// client obtained this handle from this `Node`.
14056    ///
14057    /// Because this is a get not a set, no [`fuchsia.sysmem2/Node.Sync`] is
14058    /// needed between the `GetNodeRef` and the call to `IsAlternateFor`,
14059    /// despite the two calls typically being on different channels.
14060    ///
14061    /// See also [`fuchsia.sysmem2/Node.IsAlternateFor`].
14062    ///
14063    /// All table fields are currently required.
14064    ///
14065    /// - response `node_ref` This handle can be sent via `IsAlternateFor` on a
14066    ///   different `Node` channel, to prove that the client obtained the handle
14067    ///   from this `Node`.
14068    GetNodeRef { responder: NodeGetNodeRefResponder },
14069    /// Check whether the calling [`fuchsia.sysmem2/Node`] is in a subtree
14070    /// rooted at a different child token of a common parent
14071    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`], in relation to the
14072    /// passed-in `node_ref`.
14073    ///
14074    /// This call is for assisting with admission control de-duplication, and
14075    /// with debugging.
14076    ///
14077    /// The `node_ref` must be obtained using
14078    /// [`fuchsia.sysmem2/Node.GetNodeRef`].
14079    ///
14080    /// The `node_ref` can be a duplicated handle; it's not necessary to call
14081    /// `GetNodeRef` for every call to [`fuchsia.sysmem2/Node.IsAlternateFor`].
14082    ///
14083    /// If a calling token may not actually be a valid token at all due to a
14084    /// potentially hostile/untrusted provider of the token, call
14085    /// [`fuchsia.sysmem2/Allocator.ValidateBufferCollectionToken`] first
14086    /// instead of potentially getting stuck indefinitely if `IsAlternateFor`
14087    /// never responds due to a calling token not being a real token (not really
14088    /// talking to sysmem).  Another option is to call
14089    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] with this token first
14090    /// which also validates the token along with converting it to a
14091    /// [`fuchsia.sysmem2/BufferCollection`], then call `IsAlternateFor`.
14092    ///
14093    /// All table fields are currently required.
14094    ///
14095    /// - response `is_alternate`
14096    ///   - true: The first parent node in common between the calling node and
14097    ///     the `node_ref` `Node` is a `BufferCollectionTokenGroup`.  This means
14098    ///     that the calling `Node` and the `node_ref` `Node` will not have both
14099    ///     their constraints apply - rather sysmem will choose one or the other
14100    ///     of the constraints - never both.  This is because only one child of
14101    ///     a `BufferCollectionTokenGroup` is selected during logical
14102    ///     allocation, with only that one child's subtree contributing to
14103    ///     constraints aggregation.
14104    ///   - false: The first parent node in common between the calling `Node`
14105    ///     and the `node_ref` `Node` is not a `BufferCollectionTokenGroup`.
14106    ///     Currently, this means the first parent node in common is a
14107    ///     `BufferCollectionToken` or `BufferCollection` (regardless of not
14108    ///     `Release`ed).  This means that the calling `Node` and the `node_ref`
14109    ///     `Node` may have both their constraints apply during constraints
14110    ///     aggregation of the logical allocation, if both `Node`(s) are
14111    ///     selected by any parent `BufferCollectionTokenGroup`(s) involved. In
14112    ///     this case, there is no `BufferCollectionTokenGroup` that will
14113    ///     directly prevent the two `Node`(s) from both being selected and
14114    ///     their constraints both aggregated, but even when false, one or both
14115    ///     `Node`(s) may still be eliminated from consideration if one or both
14116    ///     `Node`(s) has a direct or indirect parent
14117    ///     `BufferCollectionTokenGroup` which selects a child subtree other
14118    ///     than the subtree containing the calling `Node` or `node_ref` `Node`.
14119    /// * error `[fuchsia.sysmem2/Error.NOT_FOUND]` The node_ref wasn't
14120    ///   associated with the same buffer collection as the calling `Node`.
14121    ///   Another reason for this error is if the `node_ref` is an
14122    ///   [`zx.Handle.EVENT`] handle with sufficient rights, but isn't actually
14123    ///   a real `node_ref` obtained from `GetNodeRef`.
14124    /// * error `[fuchsia.sysmem2/Error.PROTOCOL_DEVIATION]` The caller passed a
14125    ///   `node_ref` that isn't a [`zx.Handle:EVENT`] handle , or doesn't have
14126    ///   the needed rights expected on a real `node_ref`.
14127    /// * No other failing status codes are returned by this call.  However,
14128    ///   sysmem may add additional codes in future, so the client should have
14129    ///   sensible default handling for any failing status code.
14130    IsAlternateFor { payload: NodeIsAlternateForRequest, responder: NodeIsAlternateForResponder },
14131    /// Get the buffer collection ID. This ID is also available from
14132    /// [`fuchsia.sysmem2/Allocator.GetVmoInfo`] (along with the `buffer_index`
14133    /// within the collection).
14134    ///
14135    /// This call is mainly useful in situations where we can't convey a
14136    /// [`fuchsia.sysmem2/BufferCollectionToken`] or
14137    /// [`fuchsia.sysmem2/BufferCollection`] directly, but can only convey a VMO
14138    /// handle, which can be joined back up with a `BufferCollection` client end
14139    /// that was created via a different path. Prefer to convey a
14140    /// `BufferCollectionToken` or `BufferCollection` directly when feasible.
14141    ///
14142    /// Trusting a `buffer_collection_id` value from a source other than sysmem
14143    /// is analogous to trusting a koid value from a source other than zircon.
14144    /// Both should be avoided unless really necessary, and both require
14145    /// caution. In some situations it may be reasonable to refer to a
14146    /// pre-established `BufferCollection` by `buffer_collection_id` via a
14147    /// protocol for efficiency reasons, but an incoming value purporting to be
14148    /// a `buffer_collection_id` is not sufficient alone to justify granting the
14149    /// sender of the `buffer_collection_id` any capability. The sender must
14150    /// first prove to a receiver that the sender has/had a VMO or has/had a
14151    /// `BufferCollectionToken` to the same collection by sending a handle that
14152    /// sysmem confirms is a valid sysmem handle and which sysmem maps to the
14153    /// `buffer_collection_id` value. The receiver should take care to avoid
14154    /// assuming that a sender had a `BufferCollectionToken` in cases where the
14155    /// sender has only proven that the sender had a VMO.
14156    ///
14157    /// - response `buffer_collection_id` This ID is unique per buffer
14158    ///   collection per boot. Each buffer is uniquely identified by the
14159    ///   `buffer_collection_id` and `buffer_index` together.
14160    GetBufferCollectionId { responder: NodeGetBufferCollectionIdResponder },
14161    /// Sets the current [`fuchsia.sysmem2/Node`] and all child `Node`(s)
14162    /// created after this message to weak, which means that a client's `Node`
14163    /// client end (or a child created after this message) is not alone
14164    /// sufficient to keep allocated VMOs alive.
14165    ///
14166    /// All VMOs obtained from weak `Node`(s) are weak sysmem VMOs. See also
14167    /// `close_weak_asap`.
14168    ///
14169    /// This message is only permitted before the `Node` becomes ready for
14170    /// allocation (else the server closes the channel with `ZX_ERR_BAD_STATE`):
14171    ///   * `BufferCollectionToken`: any time
14172    ///   * `BufferCollection`: before `SetConstraints`
14173    ///   * `BufferCollectionTokenGroup`: before `AllChildrenPresent`
14174    ///
14175    /// Currently, no conversion from strong `Node` to weak `Node` after ready
14176    /// for allocation is provided, but a client can simulate that by creating
14177    /// an additional `Node` before allocation and setting that additional
14178    /// `Node` to weak, and then potentially at some point later sending
14179    /// `Release` and closing the client end of the client's strong `Node`, but
14180    /// keeping the client's weak `Node`.
14181    ///
14182    /// Zero strong `Node`(s) and zero strong VMO handles will result in buffer
14183    /// collection failure (all `Node` client end(s) will see
14184    /// `ZX_CHANNEL_PEER_CLOSED` and all `close_weak_asap` `client_end`(s) will
14185    /// see `ZX_EVENTPAIR_PEER_CLOSED`), but sysmem (intentionally) won't notice
14186    /// this situation until all `Node`(s) are ready for allocation. For initial
14187    /// allocation to succeed, at least one strong `Node` is required to exist
14188    /// at allocation time, but after that client receives VMO handles, that
14189    /// client can `BufferCollection.Release` and close the client end without
14190    /// causing this type of failure.
14191    ///
14192    /// This implies [`fuchsia.sysmem2/Node.SetWeakOk`] as well, but does not
14193    /// imply `SetWeakOk` with `for_children_also` true, which can be sent
14194    /// separately as appropriate.
14195    SetWeak { control_handle: NodeControlHandle },
14196    /// This indicates to sysmem that the client is prepared to pay attention to
14197    /// `close_weak_asap`.
14198    ///
14199    /// If sent, this message must be before
14200    /// [`fuchsia.sysmem2/BufferCollection.WaitForAllBuffersAllocated`].
14201    ///
14202    /// All participants using a weak [`fuchsia.sysmem2/BufferCollection`] must
14203    /// send this message before `WaitForAllBuffersAllocated`, or a parent
14204    /// `Node` must have sent [`fuchsia.sysmem2/Node.SetWeakOk`] with
14205    /// `for_child_nodes_also` true, else the `WaitForAllBuffersAllocated` will
14206    /// trigger buffer collection failure.
14207    ///
14208    /// This message is necessary because weak sysmem VMOs have not always been
14209    /// a thing, so older clients are not aware of the need to pay attention to
14210    /// `close_weak_asap` `ZX_EVENTPAIR_PEER_CLOSED` and close all remaining
14211    /// sysmem weak VMO handles asap. By having this message and requiring
14212    /// participants to indicate their acceptance of this aspect of the overall
14213    /// protocol, we avoid situations where an older client is delivered a weak
14214    /// VMO without any way for sysmem to get that VMO to close quickly later
14215    /// (and on a per-buffer basis).
14216    ///
14217    /// A participant that doesn't handle `close_weak_asap` and also doesn't
14218    /// retrieve any VMO handles via `WaitForAllBuffersAllocated` doesn't need
14219    /// to send `SetWeakOk` (and doesn't need to have a parent `Node` send
14220    /// `SetWeakOk` with `for_child_nodes_also` true either). However, if that
14221    /// same participant has a child/delegate which does retrieve VMOs, that
14222    /// child/delegate will need to send `SetWeakOk` before
14223    /// `WaitForAllBuffersAllocated`.
14224    ///
14225    /// + request `for_child_nodes_also` If present and true, this means direct
14226    ///   child nodes of this node created after this message plus all
14227    ///   descendants of those nodes will behave as if `SetWeakOk` was sent on
14228    ///   those nodes. Any child node of this node that was created before this
14229    ///   message is not included. This setting is "sticky" in the sense that a
14230    ///   subsequent `SetWeakOk` without this bool set to true does not reset
14231    ///   the server-side bool. If this creates a problem for a participant, a
14232    ///   workaround is to `SetWeakOk` with `for_child_nodes_also` true on child
14233    ///   tokens instead, as appropriate. A participant should only set
14234    ///   `for_child_nodes_also` true if the participant can really promise to
14235    ///   obey `close_weak_asap` both for its own weak VMO handles, and for all
14236    ///   weak VMO handles held by participants holding the corresponding child
14237    ///   `Node`(s). When `for_child_nodes_also` is set, descendent `Node`(s)
14238    ///   which are using sysmem(1) can be weak, despite the clients of those
14239    ///   sysmem1 `Node`(s) not having any direct way to `SetWeakOk` or any
14240    ///   direct way to find out about `close_weak_asap`. This only applies to
14241    ///   descendents of this `Node` which are using sysmem(1), not to this
14242    ///   `Node` when converted directly from a sysmem2 token to a sysmem(1)
14243    ///   token, which will fail allocation unless an ancestor of this `Node`
14244    ///   specified `for_child_nodes_also` true.
14245    SetWeakOk { payload: NodeSetWeakOkRequest, control_handle: NodeControlHandle },
14246    /// The server_end will be closed after this `Node` and any child nodes have
14247    /// have released their buffer counts, making those counts available for
14248    /// reservation by a different `Node` via
14249    /// [`fuchsia.sysmem2/BufferCollection.AttachToken`].
14250    ///
14251    /// The `Node` buffer counts may not be released until the entire tree of
14252    /// `Node`(s) is closed or failed, because
14253    /// [`fuchsia.sysmem2/BufferCollection.Release`] followed by channel close
14254    /// does not immediately un-reserve the `Node` buffer counts. Instead, the
14255    /// `Node` buffer counts remain reserved until the orphaned node is later
14256    /// cleaned up.
14257    ///
14258    /// If the `Node` exceeds a fairly large number of attached eventpair server
14259    /// ends, a log message will indicate this and the `Node` (and the
14260    /// appropriate) sub-tree will fail.
14261    ///
14262    /// The `server_end` will remain open when
14263    /// [`fuchsia.sysmem2/Allocator.BindSharedCollection`] converts a
14264    /// [`fuchsia.sysmem2/BufferCollectionToken`] into a
14265    /// [`fuchsia.sysmem2/BufferCollection`].
14266    ///
14267    /// This message can also be used with a
14268    /// [`fuchsia.sysmem2/BufferCollectionTokenGroup`].
14269    AttachNodeTracking { payload: NodeAttachNodeTrackingRequest, control_handle: NodeControlHandle },
14270    /// An interaction was received which does not match any known method.
14271    #[non_exhaustive]
14272    _UnknownMethod {
14273        /// Ordinal of the method that was called.
14274        ordinal: u64,
14275        control_handle: NodeControlHandle,
14276        method_type: fidl::MethodType,
14277    },
14278}
14279
14280impl NodeRequest {
14281    #[allow(irrefutable_let_patterns)]
14282    pub fn into_sync(self) -> Option<(NodeSyncResponder)> {
14283        if let NodeRequest::Sync { responder } = self {
14284            Some((responder))
14285        } else {
14286            None
14287        }
14288    }
14289
14290    #[allow(irrefutable_let_patterns)]
14291    pub fn into_release(self) -> Option<(NodeControlHandle)> {
14292        if let NodeRequest::Release { control_handle } = self {
14293            Some((control_handle))
14294        } else {
14295            None
14296        }
14297    }
14298
14299    #[allow(irrefutable_let_patterns)]
14300    pub fn into_set_name(self) -> Option<(NodeSetNameRequest, NodeControlHandle)> {
14301        if let NodeRequest::SetName { payload, control_handle } = self {
14302            Some((payload, control_handle))
14303        } else {
14304            None
14305        }
14306    }
14307
14308    #[allow(irrefutable_let_patterns)]
14309    pub fn into_set_debug_client_info(
14310        self,
14311    ) -> Option<(NodeSetDebugClientInfoRequest, NodeControlHandle)> {
14312        if let NodeRequest::SetDebugClientInfo { payload, control_handle } = self {
14313            Some((payload, control_handle))
14314        } else {
14315            None
14316        }
14317    }
14318
14319    #[allow(irrefutable_let_patterns)]
14320    pub fn into_set_debug_timeout_log_deadline(
14321        self,
14322    ) -> Option<(NodeSetDebugTimeoutLogDeadlineRequest, NodeControlHandle)> {
14323        if let NodeRequest::SetDebugTimeoutLogDeadline { payload, control_handle } = self {
14324            Some((payload, control_handle))
14325        } else {
14326            None
14327        }
14328    }
14329
14330    #[allow(irrefutable_let_patterns)]
14331    pub fn into_set_verbose_logging(self) -> Option<(NodeControlHandle)> {
14332        if let NodeRequest::SetVerboseLogging { control_handle } = self {
14333            Some((control_handle))
14334        } else {
14335            None
14336        }
14337    }
14338
14339    #[allow(irrefutable_let_patterns)]
14340    pub fn into_get_node_ref(self) -> Option<(NodeGetNodeRefResponder)> {
14341        if let NodeRequest::GetNodeRef { responder } = self {
14342            Some((responder))
14343        } else {
14344            None
14345        }
14346    }
14347
14348    #[allow(irrefutable_let_patterns)]
14349    pub fn into_is_alternate_for(
14350        self,
14351    ) -> Option<(NodeIsAlternateForRequest, NodeIsAlternateForResponder)> {
14352        if let NodeRequest::IsAlternateFor { payload, responder } = self {
14353            Some((payload, responder))
14354        } else {
14355            None
14356        }
14357    }
14358
14359    #[allow(irrefutable_let_patterns)]
14360    pub fn into_get_buffer_collection_id(self) -> Option<(NodeGetBufferCollectionIdResponder)> {
14361        if let NodeRequest::GetBufferCollectionId { responder } = self {
14362            Some((responder))
14363        } else {
14364            None
14365        }
14366    }
14367
14368    #[allow(irrefutable_let_patterns)]
14369    pub fn into_set_weak(self) -> Option<(NodeControlHandle)> {
14370        if let NodeRequest::SetWeak { control_handle } = self {
14371            Some((control_handle))
14372        } else {
14373            None
14374        }
14375    }
14376
14377    #[allow(irrefutable_let_patterns)]
14378    pub fn into_set_weak_ok(self) -> Option<(NodeSetWeakOkRequest, NodeControlHandle)> {
14379        if let NodeRequest::SetWeakOk { payload, control_handle } = self {
14380            Some((payload, control_handle))
14381        } else {
14382            None
14383        }
14384    }
14385
14386    #[allow(irrefutable_let_patterns)]
14387    pub fn into_attach_node_tracking(
14388        self,
14389    ) -> Option<(NodeAttachNodeTrackingRequest, NodeControlHandle)> {
14390        if let NodeRequest::AttachNodeTracking { payload, control_handle } = self {
14391            Some((payload, control_handle))
14392        } else {
14393            None
14394        }
14395    }
14396
14397    /// Name of the method defined in FIDL
14398    pub fn method_name(&self) -> &'static str {
14399        match *self {
14400            NodeRequest::Sync { .. } => "sync",
14401            NodeRequest::Release { .. } => "release",
14402            NodeRequest::SetName { .. } => "set_name",
14403            NodeRequest::SetDebugClientInfo { .. } => "set_debug_client_info",
14404            NodeRequest::SetDebugTimeoutLogDeadline { .. } => "set_debug_timeout_log_deadline",
14405            NodeRequest::SetVerboseLogging { .. } => "set_verbose_logging",
14406            NodeRequest::GetNodeRef { .. } => "get_node_ref",
14407            NodeRequest::IsAlternateFor { .. } => "is_alternate_for",
14408            NodeRequest::GetBufferCollectionId { .. } => "get_buffer_collection_id",
14409            NodeRequest::SetWeak { .. } => "set_weak",
14410            NodeRequest::SetWeakOk { .. } => "set_weak_ok",
14411            NodeRequest::AttachNodeTracking { .. } => "attach_node_tracking",
14412            NodeRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
14413                "unknown one-way method"
14414            }
14415            NodeRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
14416                "unknown two-way method"
14417            }
14418        }
14419    }
14420}
14421
14422#[derive(Debug, Clone)]
14423pub struct NodeControlHandle {
14424    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
14425}
14426
14427impl fidl::endpoints::ControlHandle for NodeControlHandle {
14428    fn shutdown(&self) {
14429        self.inner.shutdown()
14430    }
14431    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
14432        self.inner.shutdown_with_epitaph(status)
14433    }
14434
14435    fn is_closed(&self) -> bool {
14436        self.inner.channel().is_closed()
14437    }
14438    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
14439        self.inner.channel().on_closed()
14440    }
14441
14442    #[cfg(target_os = "fuchsia")]
14443    fn signal_peer(
14444        &self,
14445        clear_mask: zx::Signals,
14446        set_mask: zx::Signals,
14447    ) -> Result<(), zx_status::Status> {
14448        use fidl::Peered;
14449        self.inner.channel().signal_peer(clear_mask, set_mask)
14450    }
14451}
14452
14453impl NodeControlHandle {}
14454
14455#[must_use = "FIDL methods require a response to be sent"]
14456#[derive(Debug)]
14457pub struct NodeSyncResponder {
14458    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14459    tx_id: u32,
14460}
14461
14462/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14463/// if the responder is dropped without sending a response, so that the client
14464/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14465impl std::ops::Drop for NodeSyncResponder {
14466    fn drop(&mut self) {
14467        self.control_handle.shutdown();
14468        // Safety: drops once, never accessed again
14469        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14470    }
14471}
14472
14473impl fidl::endpoints::Responder for NodeSyncResponder {
14474    type ControlHandle = NodeControlHandle;
14475
14476    fn control_handle(&self) -> &NodeControlHandle {
14477        &self.control_handle
14478    }
14479
14480    fn drop_without_shutdown(mut self) {
14481        // Safety: drops once, never accessed again due to mem::forget
14482        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14483        // Prevent Drop from running (which would shut down the channel)
14484        std::mem::forget(self);
14485    }
14486}
14487
14488impl NodeSyncResponder {
14489    /// Sends a response to the FIDL transaction.
14490    ///
14491    /// Sets the channel to shutdown if an error occurs.
14492    pub fn send(self) -> Result<(), fidl::Error> {
14493        let _result = self.send_raw();
14494        if _result.is_err() {
14495            self.control_handle.shutdown();
14496        }
14497        self.drop_without_shutdown();
14498        _result
14499    }
14500
14501    /// Similar to "send" but does not shutdown the channel if an error occurs.
14502    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
14503        let _result = self.send_raw();
14504        self.drop_without_shutdown();
14505        _result
14506    }
14507
14508    fn send_raw(&self) -> Result<(), fidl::Error> {
14509        self.control_handle.inner.send::<fidl::encoding::FlexibleType<fidl::encoding::EmptyStruct>>(
14510            fidl::encoding::Flexible::new(()),
14511            self.tx_id,
14512            0x11ac2555cf575b54,
14513            fidl::encoding::DynamicFlags::FLEXIBLE,
14514        )
14515    }
14516}
14517
14518#[must_use = "FIDL methods require a response to be sent"]
14519#[derive(Debug)]
14520pub struct NodeGetNodeRefResponder {
14521    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14522    tx_id: u32,
14523}
14524
14525/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14526/// if the responder is dropped without sending a response, so that the client
14527/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14528impl std::ops::Drop for NodeGetNodeRefResponder {
14529    fn drop(&mut self) {
14530        self.control_handle.shutdown();
14531        // Safety: drops once, never accessed again
14532        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14533    }
14534}
14535
14536impl fidl::endpoints::Responder for NodeGetNodeRefResponder {
14537    type ControlHandle = NodeControlHandle;
14538
14539    fn control_handle(&self) -> &NodeControlHandle {
14540        &self.control_handle
14541    }
14542
14543    fn drop_without_shutdown(mut self) {
14544        // Safety: drops once, never accessed again due to mem::forget
14545        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14546        // Prevent Drop from running (which would shut down the channel)
14547        std::mem::forget(self);
14548    }
14549}
14550
14551impl NodeGetNodeRefResponder {
14552    /// Sends a response to the FIDL transaction.
14553    ///
14554    /// Sets the channel to shutdown if an error occurs.
14555    pub fn send(self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
14556        let _result = self.send_raw(payload);
14557        if _result.is_err() {
14558            self.control_handle.shutdown();
14559        }
14560        self.drop_without_shutdown();
14561        _result
14562    }
14563
14564    /// Similar to "send" but does not shutdown the channel if an error occurs.
14565    pub fn send_no_shutdown_on_err(
14566        self,
14567        mut payload: NodeGetNodeRefResponse,
14568    ) -> Result<(), fidl::Error> {
14569        let _result = self.send_raw(payload);
14570        self.drop_without_shutdown();
14571        _result
14572    }
14573
14574    fn send_raw(&self, mut payload: NodeGetNodeRefResponse) -> Result<(), fidl::Error> {
14575        self.control_handle.inner.send::<fidl::encoding::FlexibleType<NodeGetNodeRefResponse>>(
14576            fidl::encoding::Flexible::new(&mut payload),
14577            self.tx_id,
14578            0x5b3d0e51614df053,
14579            fidl::encoding::DynamicFlags::FLEXIBLE,
14580        )
14581    }
14582}
14583
14584#[must_use = "FIDL methods require a response to be sent"]
14585#[derive(Debug)]
14586pub struct NodeIsAlternateForResponder {
14587    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14588    tx_id: u32,
14589}
14590
14591/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14592/// if the responder is dropped without sending a response, so that the client
14593/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14594impl std::ops::Drop for NodeIsAlternateForResponder {
14595    fn drop(&mut self) {
14596        self.control_handle.shutdown();
14597        // Safety: drops once, never accessed again
14598        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14599    }
14600}
14601
14602impl fidl::endpoints::Responder for NodeIsAlternateForResponder {
14603    type ControlHandle = NodeControlHandle;
14604
14605    fn control_handle(&self) -> &NodeControlHandle {
14606        &self.control_handle
14607    }
14608
14609    fn drop_without_shutdown(mut self) {
14610        // Safety: drops once, never accessed again due to mem::forget
14611        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14612        // Prevent Drop from running (which would shut down the channel)
14613        std::mem::forget(self);
14614    }
14615}
14616
14617impl NodeIsAlternateForResponder {
14618    /// Sends a response to the FIDL transaction.
14619    ///
14620    /// Sets the channel to shutdown if an error occurs.
14621    pub fn send(
14622        self,
14623        mut result: Result<&NodeIsAlternateForResponse, Error>,
14624    ) -> Result<(), fidl::Error> {
14625        let _result = self.send_raw(result);
14626        if _result.is_err() {
14627            self.control_handle.shutdown();
14628        }
14629        self.drop_without_shutdown();
14630        _result
14631    }
14632
14633    /// Similar to "send" but does not shutdown the channel if an error occurs.
14634    pub fn send_no_shutdown_on_err(
14635        self,
14636        mut result: Result<&NodeIsAlternateForResponse, Error>,
14637    ) -> Result<(), fidl::Error> {
14638        let _result = self.send_raw(result);
14639        self.drop_without_shutdown();
14640        _result
14641    }
14642
14643    fn send_raw(
14644        &self,
14645        mut result: Result<&NodeIsAlternateForResponse, Error>,
14646    ) -> Result<(), fidl::Error> {
14647        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
14648            NodeIsAlternateForResponse,
14649            Error,
14650        >>(
14651            fidl::encoding::FlexibleResult::new(result),
14652            self.tx_id,
14653            0x3a58e00157e0825,
14654            fidl::encoding::DynamicFlags::FLEXIBLE,
14655        )
14656    }
14657}
14658
14659#[must_use = "FIDL methods require a response to be sent"]
14660#[derive(Debug)]
14661pub struct NodeGetBufferCollectionIdResponder {
14662    control_handle: std::mem::ManuallyDrop<NodeControlHandle>,
14663    tx_id: u32,
14664}
14665
14666/// Set the the channel to be shutdown (see [`NodeControlHandle::shutdown`])
14667/// if the responder is dropped without sending a response, so that the client
14668/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
14669impl std::ops::Drop for NodeGetBufferCollectionIdResponder {
14670    fn drop(&mut self) {
14671        self.control_handle.shutdown();
14672        // Safety: drops once, never accessed again
14673        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14674    }
14675}
14676
14677impl fidl::endpoints::Responder for NodeGetBufferCollectionIdResponder {
14678    type ControlHandle = NodeControlHandle;
14679
14680    fn control_handle(&self) -> &NodeControlHandle {
14681        &self.control_handle
14682    }
14683
14684    fn drop_without_shutdown(mut self) {
14685        // Safety: drops once, never accessed again due to mem::forget
14686        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
14687        // Prevent Drop from running (which would shut down the channel)
14688        std::mem::forget(self);
14689    }
14690}
14691
14692impl NodeGetBufferCollectionIdResponder {
14693    /// Sends a response to the FIDL transaction.
14694    ///
14695    /// Sets the channel to shutdown if an error occurs.
14696    pub fn send(self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
14697        let _result = self.send_raw(payload);
14698        if _result.is_err() {
14699            self.control_handle.shutdown();
14700        }
14701        self.drop_without_shutdown();
14702        _result
14703    }
14704
14705    /// Similar to "send" but does not shutdown the channel if an error occurs.
14706    pub fn send_no_shutdown_on_err(
14707        self,
14708        mut payload: &NodeGetBufferCollectionIdResponse,
14709    ) -> Result<(), fidl::Error> {
14710        let _result = self.send_raw(payload);
14711        self.drop_without_shutdown();
14712        _result
14713    }
14714
14715    fn send_raw(&self, mut payload: &NodeGetBufferCollectionIdResponse) -> Result<(), fidl::Error> {
14716        self.control_handle
14717            .inner
14718            .send::<fidl::encoding::FlexibleType<NodeGetBufferCollectionIdResponse>>(
14719                fidl::encoding::Flexible::new(payload),
14720                self.tx_id,
14721                0x77d19a494b78ba8c,
14722                fidl::encoding::DynamicFlags::FLEXIBLE,
14723            )
14724    }
14725}
14726
14727#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
14728pub struct SecureMemMarker;
14729
14730impl fidl::endpoints::ProtocolMarker for SecureMemMarker {
14731    type Proxy = SecureMemProxy;
14732    type RequestStream = SecureMemRequestStream;
14733    #[cfg(target_os = "fuchsia")]
14734    type SynchronousProxy = SecureMemSynchronousProxy;
14735
14736    const DEBUG_NAME: &'static str = "(anonymous) SecureMem";
14737}
14738pub type SecureMemGetPhysicalSecureHeapsResult =
14739    Result<SecureMemGetPhysicalSecureHeapsResponse, Error>;
14740pub type SecureMemGetDynamicSecureHeapsResult =
14741    Result<SecureMemGetDynamicSecureHeapsResponse, Error>;
14742pub type SecureMemGetPhysicalSecureHeapPropertiesResult =
14743    Result<SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>;
14744pub type SecureMemAddSecureHeapPhysicalRangeResult = Result<(), Error>;
14745pub type SecureMemDeleteSecureHeapPhysicalRangeResult = Result<(), Error>;
14746pub type SecureMemModifySecureHeapPhysicalRangeResult = Result<(), Error>;
14747pub type SecureMemZeroSubRangeResult = Result<(), Error>;
14748
14749pub trait SecureMemProxyInterface: Send + Sync {
14750    type GetPhysicalSecureHeapsResponseFut: std::future::Future<Output = Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error>>
14751        + Send;
14752    fn r#get_physical_secure_heaps(&self) -> Self::GetPhysicalSecureHeapsResponseFut;
14753    type GetDynamicSecureHeapsResponseFut: std::future::Future<Output = Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error>>
14754        + Send;
14755    fn r#get_dynamic_secure_heaps(&self) -> Self::GetDynamicSecureHeapsResponseFut;
14756    type GetPhysicalSecureHeapPropertiesResponseFut: std::future::Future<
14757            Output = Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error>,
14758        > + Send;
14759    fn r#get_physical_secure_heap_properties(
14760        &self,
14761        payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
14762    ) -> Self::GetPhysicalSecureHeapPropertiesResponseFut;
14763    type AddSecureHeapPhysicalRangeResponseFut: std::future::Future<Output = Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error>>
14764        + Send;
14765    fn r#add_secure_heap_physical_range(
14766        &self,
14767        payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
14768    ) -> Self::AddSecureHeapPhysicalRangeResponseFut;
14769    type DeleteSecureHeapPhysicalRangeResponseFut: std::future::Future<
14770            Output = Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error>,
14771        > + Send;
14772    fn r#delete_secure_heap_physical_range(
14773        &self,
14774        payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
14775    ) -> Self::DeleteSecureHeapPhysicalRangeResponseFut;
14776    type ModifySecureHeapPhysicalRangeResponseFut: std::future::Future<
14777            Output = Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error>,
14778        > + Send;
14779    fn r#modify_secure_heap_physical_range(
14780        &self,
14781        payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
14782    ) -> Self::ModifySecureHeapPhysicalRangeResponseFut;
14783    type ZeroSubRangeResponseFut: std::future::Future<Output = Result<SecureMemZeroSubRangeResult, fidl::Error>>
14784        + Send;
14785    fn r#zero_sub_range(
14786        &self,
14787        payload: &SecureMemZeroSubRangeRequest,
14788    ) -> Self::ZeroSubRangeResponseFut;
14789}
14790#[derive(Debug)]
14791#[cfg(target_os = "fuchsia")]
14792pub struct SecureMemSynchronousProxy {
14793    client: fidl::client::sync::Client,
14794}
14795
14796#[cfg(target_os = "fuchsia")]
14797impl fidl::endpoints::SynchronousProxy for SecureMemSynchronousProxy {
14798    type Proxy = SecureMemProxy;
14799    type Protocol = SecureMemMarker;
14800
14801    fn from_channel(inner: fidl::Channel) -> Self {
14802        Self::new(inner)
14803    }
14804
14805    fn into_channel(self) -> fidl::Channel {
14806        self.client.into_channel()
14807    }
14808
14809    fn as_channel(&self) -> &fidl::Channel {
14810        self.client.as_channel()
14811    }
14812}
14813
14814#[cfg(target_os = "fuchsia")]
14815impl SecureMemSynchronousProxy {
14816    pub fn new(channel: fidl::Channel) -> Self {
14817        let protocol_name = <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
14818        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
14819    }
14820
14821    pub fn into_channel(self) -> fidl::Channel {
14822        self.client.into_channel()
14823    }
14824
14825    /// Waits until an event arrives and returns it. It is safe for other
14826    /// threads to make concurrent requests while waiting for an event.
14827    pub fn wait_for_event(
14828        &self,
14829        deadline: zx::MonotonicInstant,
14830    ) -> Result<SecureMemEvent, fidl::Error> {
14831        SecureMemEvent::decode(self.client.wait_for_event(deadline)?)
14832    }
14833
14834    /// Gets the physical address and length of any secure heap whose physical
14835    /// range is configured via the TEE.
14836    ///
14837    /// Presently, these will be fixed physical addresses and lengths, with the
14838    /// location plumbed via the TEE.
14839    ///
14840    /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
14841    /// when there isn't any special heap-specific per-VMO setup or teardown
14842    /// required.
14843    ///
14844    /// The physical range must be secured/protected by the TEE before the
14845    /// securemem driver responds to this request with success.
14846    ///
14847    /// Sysmem should only call this once.  Returning zero heaps is not a
14848    /// failure.
14849    ///
14850    /// Errors:
14851    ///  * PROTOCOL_DEVIATION - called more than once.
14852    ///  * UNSPECIFIED - generic internal error (such as in communication
14853    ///    with TEE which doesn't generate zx_status_t errors).
14854    ///  * other errors are allowed; any other errors should be treated the same
14855    ///    as UNSPECIFIED.
14856    pub fn r#get_physical_secure_heaps(
14857        &self,
14858        ___deadline: zx::MonotonicInstant,
14859    ) -> Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error> {
14860        let _response = self.client.send_query::<
14861            fidl::encoding::EmptyPayload,
14862            fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapsResponse, Error>,
14863        >(
14864            (),
14865            0x38716300592073e3,
14866            fidl::encoding::DynamicFlags::FLEXIBLE,
14867            ___deadline,
14868        )?
14869        .into_result::<SecureMemMarker>("get_physical_secure_heaps")?;
14870        Ok(_response.map(|x| x))
14871    }
14872
14873    /// Gets information about any secure heaps whose physical pages are not
14874    /// configured by the TEE, but by sysmem.
14875    ///
14876    /// Sysmem should only call this once. Returning zero heaps is not a
14877    /// failure.
14878    ///
14879    /// Errors:
14880    ///  * PROTOCOL_DEVIATION - called more than once.
14881    ///  * UNSPECIFIED - generic internal error (such as in communication
14882    ///    with TEE which doesn't generate zx_status_t errors).
14883    ///  * other errors are allowed; any other errors should be treated the same
14884    ///    as UNSPECIFIED.
14885    pub fn r#get_dynamic_secure_heaps(
14886        &self,
14887        ___deadline: zx::MonotonicInstant,
14888    ) -> Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error> {
14889        let _response = self.client.send_query::<
14890            fidl::encoding::EmptyPayload,
14891            fidl::encoding::FlexibleResultType<SecureMemGetDynamicSecureHeapsResponse, Error>,
14892        >(
14893            (),
14894            0x1190847f99952834,
14895            fidl::encoding::DynamicFlags::FLEXIBLE,
14896            ___deadline,
14897        )?
14898        .into_result::<SecureMemMarker>("get_dynamic_secure_heaps")?;
14899        Ok(_response.map(|x| x))
14900    }
14901
14902    /// This request from sysmem to the securemem driver gets the properties of
14903    /// a protected/secure heap.
14904    ///
14905    /// This only handles heaps with a single contiguous physical extent.
14906    ///
14907    /// The heap's entire physical range is indicated in case this request needs
14908    /// some physical space to auto-detect how many ranges are REE-usable.  Any
14909    /// temporary HW protection ranges will be deleted before this request
14910    /// completes.
14911    ///
14912    /// Errors:
14913    ///  * UNSPECIFIED - generic internal error (such as in communication
14914    ///    with TEE which doesn't generate zx_status_t errors).
14915    ///  * other errors are allowed; any other errors should be treated the same
14916    ///    as UNSPECIFIED.
14917    pub fn r#get_physical_secure_heap_properties(
14918        &self,
14919        mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
14920        ___deadline: zx::MonotonicInstant,
14921    ) -> Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error> {
14922        let _response = self.client.send_query::<
14923            SecureMemGetPhysicalSecureHeapPropertiesRequest,
14924            fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
14925        >(
14926            payload,
14927            0xc6f06889009c7bc,
14928            fidl::encoding::DynamicFlags::FLEXIBLE,
14929            ___deadline,
14930        )?
14931        .into_result::<SecureMemMarker>("get_physical_secure_heap_properties")?;
14932        Ok(_response.map(|x| x))
14933    }
14934
14935    /// This request from sysmem to the securemem driver conveys a physical
14936    /// range to add, for a heap whose physical range(s) are set up via
14937    /// sysmem.
14938    ///
14939    /// Only sysmem can call this because only sysmem is handed the client end
14940    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
14941    /// securemem driver is the server end of this protocol.
14942    ///
14943    /// The securemem driver must configure all the covered offsets as protected
14944    /// before responding to this message with success.
14945    ///
14946    /// On failure, the securemem driver must ensure the protected range was not
14947    /// created.
14948    ///
14949    /// Sysmem must only call this up to once if dynamic_protection_ranges
14950    /// false.
14951    ///
14952    /// If dynamic_protection_ranges is true, sysmem can call this multiple
14953    /// times as long as the current number of ranges never exceeds
14954    /// max_protected_range_count.
14955    ///
14956    /// The caller must not attempt to add a range that matches an
14957    /// already-existing range.  Added ranges can overlap each other as long as
14958    /// no two ranges match exactly.
14959    ///
14960    /// Errors:
14961    ///   * PROTOCOL_DEVIATION - called more than once when
14962    ///     !dynamic_protection_ranges.  Adding a heap that would cause overall
14963    ///     heap count to exceed max_protected_range_count. Unexpected heap, or
14964    ///     range that doesn't conform to protected_range_granularity. See log.
14965    ///   * UNSPECIFIED - generic internal error (such as in communication
14966    ///     with TEE which doesn't generate zx_status_t errors).
14967    ///   * other errors are possible, such as from communication failures or
14968    ///     server propagation of failures.
14969    pub fn r#add_secure_heap_physical_range(
14970        &self,
14971        mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
14972        ___deadline: zx::MonotonicInstant,
14973    ) -> Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error> {
14974        let _response = self.client.send_query::<
14975            SecureMemAddSecureHeapPhysicalRangeRequest,
14976            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
14977        >(
14978            payload,
14979            0x35f695b9b6c7217a,
14980            fidl::encoding::DynamicFlags::FLEXIBLE,
14981            ___deadline,
14982        )?
14983        .into_result::<SecureMemMarker>("add_secure_heap_physical_range")?;
14984        Ok(_response.map(|x| x))
14985    }
14986
14987    /// This request from sysmem to the securemem driver conveys a physical
14988    /// range to delete, for a heap whose physical range(s) are set up via
14989    /// sysmem.
14990    ///
14991    /// Only sysmem can call this because only sysmem is handed the client end
14992    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
14993    /// securemem driver is the server end of this protocol.
14994    ///
14995    /// The securemem driver must configure all the covered offsets as not
14996    /// protected before responding to this message with success.
14997    ///
14998    /// On failure, the securemem driver must ensure the protected range was not
14999    /// deleted.
15000    ///
15001    /// Sysmem must not call this if dynamic_protection_ranges false.
15002    ///
15003    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15004    /// on various ranges that exist at the time of the call.
15005    ///
15006    /// If any portion of the range being deleted is not also covered by another
15007    /// protected range, then any ongoing DMA to any part of the entire range
15008    /// may be interrupted / may fail, potentially in a way that's disruptive to
15009    /// the entire system (bus lockup or similar, depending on device details).
15010    /// Therefore, the caller must ensure that no ongoing DMA is occurring to
15011    /// any portion of the range being deleted, unless the caller has other
15012    /// active ranges covering every block of the range being deleted.  Ongoing
15013    /// DMA to/from blocks outside the range being deleted is never impacted by
15014    /// the deletion.
15015    ///
15016    /// Errors:
15017    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15018    ///     Unexpected heap, or range that doesn't conform to
15019    ///     protected_range_granularity.
15020    ///   * UNSPECIFIED - generic internal error (such as in communication
15021    ///     with TEE which doesn't generate zx_status_t errors).
15022    ///   * NOT_FOUND - the specified range is not found.
15023    ///   * other errors are possible, such as from communication failures or
15024    ///     server propagation of failures.
15025    pub fn r#delete_secure_heap_physical_range(
15026        &self,
15027        mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15028        ___deadline: zx::MonotonicInstant,
15029    ) -> Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error> {
15030        let _response = self.client.send_query::<
15031            SecureMemDeleteSecureHeapPhysicalRangeRequest,
15032            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15033        >(
15034            payload,
15035            0xeaa58c650264c9e,
15036            fidl::encoding::DynamicFlags::FLEXIBLE,
15037            ___deadline,
15038        )?
15039        .into_result::<SecureMemMarker>("delete_secure_heap_physical_range")?;
15040        Ok(_response.map(|x| x))
15041    }
15042
15043    /// This request from sysmem to the securemem driver conveys a physical
15044    /// range to modify and its new base and length, for a heap whose physical
15045    /// range(s) are set up via sysmem.
15046    ///
15047    /// Only sysmem can call this because only sysmem is handed the client end
15048    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15049    /// securemem driver is the server end of this protocol.
15050    ///
15051    /// The securemem driver must configure the range to cover only the new
15052    /// offsets before responding to this message with success.
15053    ///
15054    /// On failure, the securemem driver must ensure the range was not changed.
15055    ///
15056    /// Sysmem must not call this if dynamic_protection_ranges false.  Sysmem
15057    /// must not call this if !is_mod_protected_range_available.
15058    ///
15059    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15060    /// on various ranges that exist at the time of the call.
15061    ///
15062    /// The range must only be modified at one end or the other, but not both.
15063    /// If the range is getting shorter, and the un-covered blocks are not
15064    /// covered by other active ranges, any ongoing DMA to the entire range
15065    /// that's geting shorter may fail in a way that disrupts the entire system
15066    /// (bus lockup or similar), so the caller must ensure that no DMA is
15067    /// ongoing to any portion of a range that is getting shorter, unless the
15068    /// blocks being un-covered by the modification to this range are all
15069    /// covered by other active ranges, in which case no disruption to ongoing
15070    /// DMA will occur.
15071    ///
15072    /// If a range is modified to become <= zero length, the range is deleted.
15073    ///
15074    /// Errors:
15075    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15076    ///     Unexpected heap, or old_range or new_range that doesn't conform to
15077    ///     protected_range_granularity, or old_range and new_range differ in
15078    ///     both begin and end (disallowed).
15079    ///   * UNSPECIFIED - generic internal error (such as in communication
15080    ///     with TEE which doesn't generate zx_status_t errors).
15081    ///   * NOT_FOUND - the specified range is not found.
15082    ///   * other errors are possible, such as from communication failures or
15083    ///     server propagation of failures.
15084    pub fn r#modify_secure_heap_physical_range(
15085        &self,
15086        mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15087        ___deadline: zx::MonotonicInstant,
15088    ) -> Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error> {
15089        let _response = self.client.send_query::<
15090            SecureMemModifySecureHeapPhysicalRangeRequest,
15091            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15092        >(
15093            payload,
15094            0x60b7448aa1187734,
15095            fidl::encoding::DynamicFlags::FLEXIBLE,
15096            ___deadline,
15097        )?
15098        .into_result::<SecureMemMarker>("modify_secure_heap_physical_range")?;
15099        Ok(_response.map(|x| x))
15100    }
15101
15102    /// Zero a sub-range of a currently-existing physical range added via
15103    /// AddSecureHeapPhysicalRange().  The sub-range must be fully covered by
15104    /// exactly one physical range, and must not overlap with any other
15105    /// physical range.
15106    ///
15107    /// is_covering_range_explicit - When true, the covering range must be one
15108    ///     of the ranges explicitly created via AddSecureHeapPhysicalRange(),
15109    ///     possibly modified since.  When false, the covering range must not
15110    ///     be one of the ranges explicitly created via
15111    ///     AddSecureHeapPhysicalRange(), but the covering range must exist as
15112    ///     a covering range not created via AddSecureHeapPhysicalRange().  The
15113    ///     covering range is typically the entire physical range (or a range
15114    ///     which covers even more) of a heap configured by the TEE and whose
15115    ///     configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
15116    ///
15117    /// Ongoing DMA is not disrupted by this request.
15118    ///
15119    /// Errors:
15120    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15121    ///     Unexpected heap.
15122    ///   * UNSPECIFIED - generic internal error (such as in communication
15123    ///     with TEE which doesn't generate zx_status_t errors).
15124    ///   * other errors are possible, such as from communication failures or
15125    ///     server propagation of failures.
15126    pub fn r#zero_sub_range(
15127        &self,
15128        mut payload: &SecureMemZeroSubRangeRequest,
15129        ___deadline: zx::MonotonicInstant,
15130    ) -> Result<SecureMemZeroSubRangeResult, fidl::Error> {
15131        let _response = self.client.send_query::<
15132            SecureMemZeroSubRangeRequest,
15133            fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15134        >(
15135            payload,
15136            0x5b25b7901a385ce5,
15137            fidl::encoding::DynamicFlags::FLEXIBLE,
15138            ___deadline,
15139        )?
15140        .into_result::<SecureMemMarker>("zero_sub_range")?;
15141        Ok(_response.map(|x| x))
15142    }
15143}
15144
15145#[cfg(target_os = "fuchsia")]
15146impl From<SecureMemSynchronousProxy> for zx::Handle {
15147    fn from(value: SecureMemSynchronousProxy) -> Self {
15148        value.into_channel().into()
15149    }
15150}
15151
15152#[cfg(target_os = "fuchsia")]
15153impl From<fidl::Channel> for SecureMemSynchronousProxy {
15154    fn from(value: fidl::Channel) -> Self {
15155        Self::new(value)
15156    }
15157}
15158
15159#[derive(Debug, Clone)]
15160pub struct SecureMemProxy {
15161    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
15162}
15163
15164impl fidl::endpoints::Proxy for SecureMemProxy {
15165    type Protocol = SecureMemMarker;
15166
15167    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
15168        Self::new(inner)
15169    }
15170
15171    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
15172        self.client.into_channel().map_err(|client| Self { client })
15173    }
15174
15175    fn as_channel(&self) -> &::fidl::AsyncChannel {
15176        self.client.as_channel()
15177    }
15178}
15179
15180impl SecureMemProxy {
15181    /// Create a new Proxy for fuchsia.sysmem2/SecureMem.
15182    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
15183        let protocol_name = <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
15184        Self { client: fidl::client::Client::new(channel, protocol_name) }
15185    }
15186
15187    /// Get a Stream of events from the remote end of the protocol.
15188    ///
15189    /// # Panics
15190    ///
15191    /// Panics if the event stream was already taken.
15192    pub fn take_event_stream(&self) -> SecureMemEventStream {
15193        SecureMemEventStream { event_receiver: self.client.take_event_receiver() }
15194    }
15195
15196    /// Gets the physical address and length of any secure heap whose physical
15197    /// range is configured via the TEE.
15198    ///
15199    /// Presently, these will be fixed physical addresses and lengths, with the
15200    /// location plumbed via the TEE.
15201    ///
15202    /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
15203    /// when there isn't any special heap-specific per-VMO setup or teardown
15204    /// required.
15205    ///
15206    /// The physical range must be secured/protected by the TEE before the
15207    /// securemem driver responds to this request with success.
15208    ///
15209    /// Sysmem should only call this once.  Returning zero heaps is not a
15210    /// failure.
15211    ///
15212    /// Errors:
15213    ///  * PROTOCOL_DEVIATION - called more than once.
15214    ///  * UNSPECIFIED - generic internal error (such as in communication
15215    ///    with TEE which doesn't generate zx_status_t errors).
15216    ///  * other errors are allowed; any other errors should be treated the same
15217    ///    as UNSPECIFIED.
15218    pub fn r#get_physical_secure_heaps(
15219        &self,
15220    ) -> fidl::client::QueryResponseFut<
15221        SecureMemGetPhysicalSecureHeapsResult,
15222        fidl::encoding::DefaultFuchsiaResourceDialect,
15223    > {
15224        SecureMemProxyInterface::r#get_physical_secure_heaps(self)
15225    }
15226
15227    /// Gets information about any secure heaps whose physical pages are not
15228    /// configured by the TEE, but by sysmem.
15229    ///
15230    /// Sysmem should only call this once. Returning zero heaps is not a
15231    /// failure.
15232    ///
15233    /// Errors:
15234    ///  * PROTOCOL_DEVIATION - called more than once.
15235    ///  * UNSPECIFIED - generic internal error (such as in communication
15236    ///    with TEE which doesn't generate zx_status_t errors).
15237    ///  * other errors are allowed; any other errors should be treated the same
15238    ///    as UNSPECIFIED.
15239    pub fn r#get_dynamic_secure_heaps(
15240        &self,
15241    ) -> fidl::client::QueryResponseFut<
15242        SecureMemGetDynamicSecureHeapsResult,
15243        fidl::encoding::DefaultFuchsiaResourceDialect,
15244    > {
15245        SecureMemProxyInterface::r#get_dynamic_secure_heaps(self)
15246    }
15247
15248    /// This request from sysmem to the securemem driver gets the properties of
15249    /// a protected/secure heap.
15250    ///
15251    /// This only handles heaps with a single contiguous physical extent.
15252    ///
15253    /// The heap's entire physical range is indicated in case this request needs
15254    /// some physical space to auto-detect how many ranges are REE-usable.  Any
15255    /// temporary HW protection ranges will be deleted before this request
15256    /// completes.
15257    ///
15258    /// Errors:
15259    ///  * UNSPECIFIED - generic internal error (such as in communication
15260    ///    with TEE which doesn't generate zx_status_t errors).
15261    ///  * other errors are allowed; any other errors should be treated the same
15262    ///    as UNSPECIFIED.
15263    pub fn r#get_physical_secure_heap_properties(
15264        &self,
15265        mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15266    ) -> fidl::client::QueryResponseFut<
15267        SecureMemGetPhysicalSecureHeapPropertiesResult,
15268        fidl::encoding::DefaultFuchsiaResourceDialect,
15269    > {
15270        SecureMemProxyInterface::r#get_physical_secure_heap_properties(self, payload)
15271    }
15272
15273    /// This request from sysmem to the securemem driver conveys a physical
15274    /// range to add, for a heap whose physical range(s) are set up via
15275    /// sysmem.
15276    ///
15277    /// Only sysmem can call this because only sysmem is handed the client end
15278    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15279    /// securemem driver is the server end of this protocol.
15280    ///
15281    /// The securemem driver must configure all the covered offsets as protected
15282    /// before responding to this message with success.
15283    ///
15284    /// On failure, the securemem driver must ensure the protected range was not
15285    /// created.
15286    ///
15287    /// Sysmem must only call this up to once if dynamic_protection_ranges
15288    /// false.
15289    ///
15290    /// If dynamic_protection_ranges is true, sysmem can call this multiple
15291    /// times as long as the current number of ranges never exceeds
15292    /// max_protected_range_count.
15293    ///
15294    /// The caller must not attempt to add a range that matches an
15295    /// already-existing range.  Added ranges can overlap each other as long as
15296    /// no two ranges match exactly.
15297    ///
15298    /// Errors:
15299    ///   * PROTOCOL_DEVIATION - called more than once when
15300    ///     !dynamic_protection_ranges.  Adding a heap that would cause overall
15301    ///     heap count to exceed max_protected_range_count. Unexpected heap, or
15302    ///     range that doesn't conform to protected_range_granularity. See log.
15303    ///   * UNSPECIFIED - generic internal error (such as in communication
15304    ///     with TEE which doesn't generate zx_status_t errors).
15305    ///   * other errors are possible, such as from communication failures or
15306    ///     server propagation of failures.
15307    pub fn r#add_secure_heap_physical_range(
15308        &self,
15309        mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15310    ) -> fidl::client::QueryResponseFut<
15311        SecureMemAddSecureHeapPhysicalRangeResult,
15312        fidl::encoding::DefaultFuchsiaResourceDialect,
15313    > {
15314        SecureMemProxyInterface::r#add_secure_heap_physical_range(self, payload)
15315    }
15316
15317    /// This request from sysmem to the securemem driver conveys a physical
15318    /// range to delete, for a heap whose physical range(s) are set up via
15319    /// sysmem.
15320    ///
15321    /// Only sysmem can call this because only sysmem is handed the client end
15322    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15323    /// securemem driver is the server end of this protocol.
15324    ///
15325    /// The securemem driver must configure all the covered offsets as not
15326    /// protected before responding to this message with success.
15327    ///
15328    /// On failure, the securemem driver must ensure the protected range was not
15329    /// deleted.
15330    ///
15331    /// Sysmem must not call this if dynamic_protection_ranges false.
15332    ///
15333    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15334    /// on various ranges that exist at the time of the call.
15335    ///
15336    /// If any portion of the range being deleted is not also covered by another
15337    /// protected range, then any ongoing DMA to any part of the entire range
15338    /// may be interrupted / may fail, potentially in a way that's disruptive to
15339    /// the entire system (bus lockup or similar, depending on device details).
15340    /// Therefore, the caller must ensure that no ongoing DMA is occurring to
15341    /// any portion of the range being deleted, unless the caller has other
15342    /// active ranges covering every block of the range being deleted.  Ongoing
15343    /// DMA to/from blocks outside the range being deleted is never impacted by
15344    /// the deletion.
15345    ///
15346    /// Errors:
15347    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15348    ///     Unexpected heap, or range that doesn't conform to
15349    ///     protected_range_granularity.
15350    ///   * UNSPECIFIED - generic internal error (such as in communication
15351    ///     with TEE which doesn't generate zx_status_t errors).
15352    ///   * NOT_FOUND - the specified range is not found.
15353    ///   * other errors are possible, such as from communication failures or
15354    ///     server propagation of failures.
15355    pub fn r#delete_secure_heap_physical_range(
15356        &self,
15357        mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15358    ) -> fidl::client::QueryResponseFut<
15359        SecureMemDeleteSecureHeapPhysicalRangeResult,
15360        fidl::encoding::DefaultFuchsiaResourceDialect,
15361    > {
15362        SecureMemProxyInterface::r#delete_secure_heap_physical_range(self, payload)
15363    }
15364
15365    /// This request from sysmem to the securemem driver conveys a physical
15366    /// range to modify and its new base and length, for a heap whose physical
15367    /// range(s) are set up via sysmem.
15368    ///
15369    /// Only sysmem can call this because only sysmem is handed the client end
15370    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
15371    /// securemem driver is the server end of this protocol.
15372    ///
15373    /// The securemem driver must configure the range to cover only the new
15374    /// offsets before responding to this message with success.
15375    ///
15376    /// On failure, the securemem driver must ensure the range was not changed.
15377    ///
15378    /// Sysmem must not call this if dynamic_protection_ranges false.  Sysmem
15379    /// must not call this if !is_mod_protected_range_available.
15380    ///
15381    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
15382    /// on various ranges that exist at the time of the call.
15383    ///
15384    /// The range must only be modified at one end or the other, but not both.
15385    /// If the range is getting shorter, and the un-covered blocks are not
15386    /// covered by other active ranges, any ongoing DMA to the entire range
15387    /// that's geting shorter may fail in a way that disrupts the entire system
15388    /// (bus lockup or similar), so the caller must ensure that no DMA is
15389    /// ongoing to any portion of a range that is getting shorter, unless the
15390    /// blocks being un-covered by the modification to this range are all
15391    /// covered by other active ranges, in which case no disruption to ongoing
15392    /// DMA will occur.
15393    ///
15394    /// If a range is modified to become <= zero length, the range is deleted.
15395    ///
15396    /// Errors:
15397    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15398    ///     Unexpected heap, or old_range or new_range that doesn't conform to
15399    ///     protected_range_granularity, or old_range and new_range differ in
15400    ///     both begin and end (disallowed).
15401    ///   * UNSPECIFIED - generic internal error (such as in communication
15402    ///     with TEE which doesn't generate zx_status_t errors).
15403    ///   * NOT_FOUND - the specified range is not found.
15404    ///   * other errors are possible, such as from communication failures or
15405    ///     server propagation of failures.
15406    pub fn r#modify_secure_heap_physical_range(
15407        &self,
15408        mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15409    ) -> fidl::client::QueryResponseFut<
15410        SecureMemModifySecureHeapPhysicalRangeResult,
15411        fidl::encoding::DefaultFuchsiaResourceDialect,
15412    > {
15413        SecureMemProxyInterface::r#modify_secure_heap_physical_range(self, payload)
15414    }
15415
15416    /// Zero a sub-range of a currently-existing physical range added via
15417    /// AddSecureHeapPhysicalRange().  The sub-range must be fully covered by
15418    /// exactly one physical range, and must not overlap with any other
15419    /// physical range.
15420    ///
15421    /// is_covering_range_explicit - When true, the covering range must be one
15422    ///     of the ranges explicitly created via AddSecureHeapPhysicalRange(),
15423    ///     possibly modified since.  When false, the covering range must not
15424    ///     be one of the ranges explicitly created via
15425    ///     AddSecureHeapPhysicalRange(), but the covering range must exist as
15426    ///     a covering range not created via AddSecureHeapPhysicalRange().  The
15427    ///     covering range is typically the entire physical range (or a range
15428    ///     which covers even more) of a heap configured by the TEE and whose
15429    ///     configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
15430    ///
15431    /// Ongoing DMA is not disrupted by this request.
15432    ///
15433    /// Errors:
15434    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
15435    ///     Unexpected heap.
15436    ///   * UNSPECIFIED - generic internal error (such as in communication
15437    ///     with TEE which doesn't generate zx_status_t errors).
15438    ///   * other errors are possible, such as from communication failures or
15439    ///     server propagation of failures.
15440    pub fn r#zero_sub_range(
15441        &self,
15442        mut payload: &SecureMemZeroSubRangeRequest,
15443    ) -> fidl::client::QueryResponseFut<
15444        SecureMemZeroSubRangeResult,
15445        fidl::encoding::DefaultFuchsiaResourceDialect,
15446    > {
15447        SecureMemProxyInterface::r#zero_sub_range(self, payload)
15448    }
15449}
15450
15451impl SecureMemProxyInterface for SecureMemProxy {
15452    type GetPhysicalSecureHeapsResponseFut = fidl::client::QueryResponseFut<
15453        SecureMemGetPhysicalSecureHeapsResult,
15454        fidl::encoding::DefaultFuchsiaResourceDialect,
15455    >;
15456    fn r#get_physical_secure_heaps(&self) -> Self::GetPhysicalSecureHeapsResponseFut {
15457        fn _decode(
15458            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15459        ) -> Result<SecureMemGetPhysicalSecureHeapsResult, fidl::Error> {
15460            let _response = fidl::client::decode_transaction_body::<
15461                fidl::encoding::FlexibleResultType<SecureMemGetPhysicalSecureHeapsResponse, Error>,
15462                fidl::encoding::DefaultFuchsiaResourceDialect,
15463                0x38716300592073e3,
15464            >(_buf?)?
15465            .into_result::<SecureMemMarker>("get_physical_secure_heaps")?;
15466            Ok(_response.map(|x| x))
15467        }
15468        self.client.send_query_and_decode::<
15469            fidl::encoding::EmptyPayload,
15470            SecureMemGetPhysicalSecureHeapsResult,
15471        >(
15472            (),
15473            0x38716300592073e3,
15474            fidl::encoding::DynamicFlags::FLEXIBLE,
15475            _decode,
15476        )
15477    }
15478
15479    type GetDynamicSecureHeapsResponseFut = fidl::client::QueryResponseFut<
15480        SecureMemGetDynamicSecureHeapsResult,
15481        fidl::encoding::DefaultFuchsiaResourceDialect,
15482    >;
15483    fn r#get_dynamic_secure_heaps(&self) -> Self::GetDynamicSecureHeapsResponseFut {
15484        fn _decode(
15485            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15486        ) -> Result<SecureMemGetDynamicSecureHeapsResult, fidl::Error> {
15487            let _response = fidl::client::decode_transaction_body::<
15488                fidl::encoding::FlexibleResultType<SecureMemGetDynamicSecureHeapsResponse, Error>,
15489                fidl::encoding::DefaultFuchsiaResourceDialect,
15490                0x1190847f99952834,
15491            >(_buf?)?
15492            .into_result::<SecureMemMarker>("get_dynamic_secure_heaps")?;
15493            Ok(_response.map(|x| x))
15494        }
15495        self.client.send_query_and_decode::<
15496            fidl::encoding::EmptyPayload,
15497            SecureMemGetDynamicSecureHeapsResult,
15498        >(
15499            (),
15500            0x1190847f99952834,
15501            fidl::encoding::DynamicFlags::FLEXIBLE,
15502            _decode,
15503        )
15504    }
15505
15506    type GetPhysicalSecureHeapPropertiesResponseFut = fidl::client::QueryResponseFut<
15507        SecureMemGetPhysicalSecureHeapPropertiesResult,
15508        fidl::encoding::DefaultFuchsiaResourceDialect,
15509    >;
15510    fn r#get_physical_secure_heap_properties(
15511        &self,
15512        mut payload: &SecureMemGetPhysicalSecureHeapPropertiesRequest,
15513    ) -> Self::GetPhysicalSecureHeapPropertiesResponseFut {
15514        fn _decode(
15515            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15516        ) -> Result<SecureMemGetPhysicalSecureHeapPropertiesResult, fidl::Error> {
15517            let _response = fidl::client::decode_transaction_body::<
15518                fidl::encoding::FlexibleResultType<
15519                    SecureMemGetPhysicalSecureHeapPropertiesResponse,
15520                    Error,
15521                >,
15522                fidl::encoding::DefaultFuchsiaResourceDialect,
15523                0xc6f06889009c7bc,
15524            >(_buf?)?
15525            .into_result::<SecureMemMarker>("get_physical_secure_heap_properties")?;
15526            Ok(_response.map(|x| x))
15527        }
15528        self.client.send_query_and_decode::<
15529            SecureMemGetPhysicalSecureHeapPropertiesRequest,
15530            SecureMemGetPhysicalSecureHeapPropertiesResult,
15531        >(
15532            payload,
15533            0xc6f06889009c7bc,
15534            fidl::encoding::DynamicFlags::FLEXIBLE,
15535            _decode,
15536        )
15537    }
15538
15539    type AddSecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15540        SecureMemAddSecureHeapPhysicalRangeResult,
15541        fidl::encoding::DefaultFuchsiaResourceDialect,
15542    >;
15543    fn r#add_secure_heap_physical_range(
15544        &self,
15545        mut payload: &SecureMemAddSecureHeapPhysicalRangeRequest,
15546    ) -> Self::AddSecureHeapPhysicalRangeResponseFut {
15547        fn _decode(
15548            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15549        ) -> Result<SecureMemAddSecureHeapPhysicalRangeResult, fidl::Error> {
15550            let _response = fidl::client::decode_transaction_body::<
15551                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15552                fidl::encoding::DefaultFuchsiaResourceDialect,
15553                0x35f695b9b6c7217a,
15554            >(_buf?)?
15555            .into_result::<SecureMemMarker>("add_secure_heap_physical_range")?;
15556            Ok(_response.map(|x| x))
15557        }
15558        self.client.send_query_and_decode::<
15559            SecureMemAddSecureHeapPhysicalRangeRequest,
15560            SecureMemAddSecureHeapPhysicalRangeResult,
15561        >(
15562            payload,
15563            0x35f695b9b6c7217a,
15564            fidl::encoding::DynamicFlags::FLEXIBLE,
15565            _decode,
15566        )
15567    }
15568
15569    type DeleteSecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15570        SecureMemDeleteSecureHeapPhysicalRangeResult,
15571        fidl::encoding::DefaultFuchsiaResourceDialect,
15572    >;
15573    fn r#delete_secure_heap_physical_range(
15574        &self,
15575        mut payload: &SecureMemDeleteSecureHeapPhysicalRangeRequest,
15576    ) -> Self::DeleteSecureHeapPhysicalRangeResponseFut {
15577        fn _decode(
15578            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15579        ) -> Result<SecureMemDeleteSecureHeapPhysicalRangeResult, fidl::Error> {
15580            let _response = fidl::client::decode_transaction_body::<
15581                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15582                fidl::encoding::DefaultFuchsiaResourceDialect,
15583                0xeaa58c650264c9e,
15584            >(_buf?)?
15585            .into_result::<SecureMemMarker>("delete_secure_heap_physical_range")?;
15586            Ok(_response.map(|x| x))
15587        }
15588        self.client.send_query_and_decode::<
15589            SecureMemDeleteSecureHeapPhysicalRangeRequest,
15590            SecureMemDeleteSecureHeapPhysicalRangeResult,
15591        >(
15592            payload,
15593            0xeaa58c650264c9e,
15594            fidl::encoding::DynamicFlags::FLEXIBLE,
15595            _decode,
15596        )
15597    }
15598
15599    type ModifySecureHeapPhysicalRangeResponseFut = fidl::client::QueryResponseFut<
15600        SecureMemModifySecureHeapPhysicalRangeResult,
15601        fidl::encoding::DefaultFuchsiaResourceDialect,
15602    >;
15603    fn r#modify_secure_heap_physical_range(
15604        &self,
15605        mut payload: &SecureMemModifySecureHeapPhysicalRangeRequest,
15606    ) -> Self::ModifySecureHeapPhysicalRangeResponseFut {
15607        fn _decode(
15608            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15609        ) -> Result<SecureMemModifySecureHeapPhysicalRangeResult, fidl::Error> {
15610            let _response = fidl::client::decode_transaction_body::<
15611                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15612                fidl::encoding::DefaultFuchsiaResourceDialect,
15613                0x60b7448aa1187734,
15614            >(_buf?)?
15615            .into_result::<SecureMemMarker>("modify_secure_heap_physical_range")?;
15616            Ok(_response.map(|x| x))
15617        }
15618        self.client.send_query_and_decode::<
15619            SecureMemModifySecureHeapPhysicalRangeRequest,
15620            SecureMemModifySecureHeapPhysicalRangeResult,
15621        >(
15622            payload,
15623            0x60b7448aa1187734,
15624            fidl::encoding::DynamicFlags::FLEXIBLE,
15625            _decode,
15626        )
15627    }
15628
15629    type ZeroSubRangeResponseFut = fidl::client::QueryResponseFut<
15630        SecureMemZeroSubRangeResult,
15631        fidl::encoding::DefaultFuchsiaResourceDialect,
15632    >;
15633    fn r#zero_sub_range(
15634        &self,
15635        mut payload: &SecureMemZeroSubRangeRequest,
15636    ) -> Self::ZeroSubRangeResponseFut {
15637        fn _decode(
15638            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
15639        ) -> Result<SecureMemZeroSubRangeResult, fidl::Error> {
15640            let _response = fidl::client::decode_transaction_body::<
15641                fidl::encoding::FlexibleResultType<fidl::encoding::EmptyStruct, Error>,
15642                fidl::encoding::DefaultFuchsiaResourceDialect,
15643                0x5b25b7901a385ce5,
15644            >(_buf?)?
15645            .into_result::<SecureMemMarker>("zero_sub_range")?;
15646            Ok(_response.map(|x| x))
15647        }
15648        self.client
15649            .send_query_and_decode::<SecureMemZeroSubRangeRequest, SecureMemZeroSubRangeResult>(
15650                payload,
15651                0x5b25b7901a385ce5,
15652                fidl::encoding::DynamicFlags::FLEXIBLE,
15653                _decode,
15654            )
15655    }
15656}
15657
15658pub struct SecureMemEventStream {
15659    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
15660}
15661
15662impl std::marker::Unpin for SecureMemEventStream {}
15663
15664impl futures::stream::FusedStream for SecureMemEventStream {
15665    fn is_terminated(&self) -> bool {
15666        self.event_receiver.is_terminated()
15667    }
15668}
15669
15670impl futures::Stream for SecureMemEventStream {
15671    type Item = Result<SecureMemEvent, fidl::Error>;
15672
15673    fn poll_next(
15674        mut self: std::pin::Pin<&mut Self>,
15675        cx: &mut std::task::Context<'_>,
15676    ) -> std::task::Poll<Option<Self::Item>> {
15677        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
15678            &mut self.event_receiver,
15679            cx
15680        )?) {
15681            Some(buf) => std::task::Poll::Ready(Some(SecureMemEvent::decode(buf))),
15682            None => std::task::Poll::Ready(None),
15683        }
15684    }
15685}
15686
15687#[derive(Debug)]
15688pub enum SecureMemEvent {
15689    #[non_exhaustive]
15690    _UnknownEvent {
15691        /// Ordinal of the event that was sent.
15692        ordinal: u64,
15693    },
15694}
15695
15696impl SecureMemEvent {
15697    /// Decodes a message buffer as a [`SecureMemEvent`].
15698    fn decode(
15699        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
15700    ) -> Result<SecureMemEvent, fidl::Error> {
15701        let (bytes, _handles) = buf.split_mut();
15702        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
15703        debug_assert_eq!(tx_header.tx_id, 0);
15704        match tx_header.ordinal {
15705            _ if tx_header.dynamic_flags().contains(fidl::encoding::DynamicFlags::FLEXIBLE) => {
15706                Ok(SecureMemEvent::_UnknownEvent { ordinal: tx_header.ordinal })
15707            }
15708            _ => Err(fidl::Error::UnknownOrdinal {
15709                ordinal: tx_header.ordinal,
15710                protocol_name: <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
15711            }),
15712        }
15713    }
15714}
15715
15716/// A Stream of incoming requests for fuchsia.sysmem2/SecureMem.
15717pub struct SecureMemRequestStream {
15718    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
15719    is_terminated: bool,
15720}
15721
15722impl std::marker::Unpin for SecureMemRequestStream {}
15723
15724impl futures::stream::FusedStream for SecureMemRequestStream {
15725    fn is_terminated(&self) -> bool {
15726        self.is_terminated
15727    }
15728}
15729
15730impl fidl::endpoints::RequestStream for SecureMemRequestStream {
15731    type Protocol = SecureMemMarker;
15732    type ControlHandle = SecureMemControlHandle;
15733
15734    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
15735        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
15736    }
15737
15738    fn control_handle(&self) -> Self::ControlHandle {
15739        SecureMemControlHandle { inner: self.inner.clone() }
15740    }
15741
15742    fn into_inner(
15743        self,
15744    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
15745    {
15746        (self.inner, self.is_terminated)
15747    }
15748
15749    fn from_inner(
15750        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
15751        is_terminated: bool,
15752    ) -> Self {
15753        Self { inner, is_terminated }
15754    }
15755}
15756
15757impl futures::Stream for SecureMemRequestStream {
15758    type Item = Result<SecureMemRequest, fidl::Error>;
15759
15760    fn poll_next(
15761        mut self: std::pin::Pin<&mut Self>,
15762        cx: &mut std::task::Context<'_>,
15763    ) -> std::task::Poll<Option<Self::Item>> {
15764        let this = &mut *self;
15765        if this.inner.check_shutdown(cx) {
15766            this.is_terminated = true;
15767            return std::task::Poll::Ready(None);
15768        }
15769        if this.is_terminated {
15770            panic!("polled SecureMemRequestStream after completion");
15771        }
15772        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
15773            |bytes, handles| {
15774                match this.inner.channel().read_etc(cx, bytes, handles) {
15775                    std::task::Poll::Ready(Ok(())) => {}
15776                    std::task::Poll::Pending => return std::task::Poll::Pending,
15777                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
15778                        this.is_terminated = true;
15779                        return std::task::Poll::Ready(None);
15780                    }
15781                    std::task::Poll::Ready(Err(e)) => {
15782                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
15783                            e.into(),
15784                        ))))
15785                    }
15786                }
15787
15788                // A message has been received from the channel
15789                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
15790
15791                std::task::Poll::Ready(Some(match header.ordinal {
15792                    0x38716300592073e3 => {
15793                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15794                        let mut req = fidl::new_empty!(
15795                            fidl::encoding::EmptyPayload,
15796                            fidl::encoding::DefaultFuchsiaResourceDialect
15797                        );
15798                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
15799                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15800                        Ok(SecureMemRequest::GetPhysicalSecureHeaps {
15801                            responder: SecureMemGetPhysicalSecureHeapsResponder {
15802                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15803                                tx_id: header.tx_id,
15804                            },
15805                        })
15806                    }
15807                    0x1190847f99952834 => {
15808                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15809                        let mut req = fidl::new_empty!(
15810                            fidl::encoding::EmptyPayload,
15811                            fidl::encoding::DefaultFuchsiaResourceDialect
15812                        );
15813                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
15814                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15815                        Ok(SecureMemRequest::GetDynamicSecureHeaps {
15816                            responder: SecureMemGetDynamicSecureHeapsResponder {
15817                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15818                                tx_id: header.tx_id,
15819                            },
15820                        })
15821                    }
15822                    0xc6f06889009c7bc => {
15823                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15824                        let mut req = fidl::new_empty!(
15825                            SecureMemGetPhysicalSecureHeapPropertiesRequest,
15826                            fidl::encoding::DefaultFuchsiaResourceDialect
15827                        );
15828                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemGetPhysicalSecureHeapPropertiesRequest>(&header, _body_bytes, handles, &mut req)?;
15829                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15830                        Ok(SecureMemRequest::GetPhysicalSecureHeapProperties {
15831                            payload: req,
15832                            responder: SecureMemGetPhysicalSecureHeapPropertiesResponder {
15833                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15834                                tx_id: header.tx_id,
15835                            },
15836                        })
15837                    }
15838                    0x35f695b9b6c7217a => {
15839                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15840                        let mut req = fidl::new_empty!(
15841                            SecureMemAddSecureHeapPhysicalRangeRequest,
15842                            fidl::encoding::DefaultFuchsiaResourceDialect
15843                        );
15844                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemAddSecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15845                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15846                        Ok(SecureMemRequest::AddSecureHeapPhysicalRange {
15847                            payload: req,
15848                            responder: SecureMemAddSecureHeapPhysicalRangeResponder {
15849                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15850                                tx_id: header.tx_id,
15851                            },
15852                        })
15853                    }
15854                    0xeaa58c650264c9e => {
15855                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15856                        let mut req = fidl::new_empty!(
15857                            SecureMemDeleteSecureHeapPhysicalRangeRequest,
15858                            fidl::encoding::DefaultFuchsiaResourceDialect
15859                        );
15860                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemDeleteSecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15861                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15862                        Ok(SecureMemRequest::DeleteSecureHeapPhysicalRange {
15863                            payload: req,
15864                            responder: SecureMemDeleteSecureHeapPhysicalRangeResponder {
15865                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15866                                tx_id: header.tx_id,
15867                            },
15868                        })
15869                    }
15870                    0x60b7448aa1187734 => {
15871                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15872                        let mut req = fidl::new_empty!(
15873                            SecureMemModifySecureHeapPhysicalRangeRequest,
15874                            fidl::encoding::DefaultFuchsiaResourceDialect
15875                        );
15876                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemModifySecureHeapPhysicalRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15877                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15878                        Ok(SecureMemRequest::ModifySecureHeapPhysicalRange {
15879                            payload: req,
15880                            responder: SecureMemModifySecureHeapPhysicalRangeResponder {
15881                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15882                                tx_id: header.tx_id,
15883                            },
15884                        })
15885                    }
15886                    0x5b25b7901a385ce5 => {
15887                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
15888                        let mut req = fidl::new_empty!(
15889                            SecureMemZeroSubRangeRequest,
15890                            fidl::encoding::DefaultFuchsiaResourceDialect
15891                        );
15892                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<SecureMemZeroSubRangeRequest>(&header, _body_bytes, handles, &mut req)?;
15893                        let control_handle = SecureMemControlHandle { inner: this.inner.clone() };
15894                        Ok(SecureMemRequest::ZeroSubRange {
15895                            payload: req,
15896                            responder: SecureMemZeroSubRangeResponder {
15897                                control_handle: std::mem::ManuallyDrop::new(control_handle),
15898                                tx_id: header.tx_id,
15899                            },
15900                        })
15901                    }
15902                    _ if header.tx_id == 0
15903                        && header
15904                            .dynamic_flags()
15905                            .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
15906                    {
15907                        Ok(SecureMemRequest::_UnknownMethod {
15908                            ordinal: header.ordinal,
15909                            control_handle: SecureMemControlHandle { inner: this.inner.clone() },
15910                            method_type: fidl::MethodType::OneWay,
15911                        })
15912                    }
15913                    _ if header
15914                        .dynamic_flags()
15915                        .contains(fidl::encoding::DynamicFlags::FLEXIBLE) =>
15916                    {
15917                        this.inner.send_framework_err(
15918                            fidl::encoding::FrameworkErr::UnknownMethod,
15919                            header.tx_id,
15920                            header.ordinal,
15921                            header.dynamic_flags(),
15922                            (bytes, handles),
15923                        )?;
15924                        Ok(SecureMemRequest::_UnknownMethod {
15925                            ordinal: header.ordinal,
15926                            control_handle: SecureMemControlHandle { inner: this.inner.clone() },
15927                            method_type: fidl::MethodType::TwoWay,
15928                        })
15929                    }
15930                    _ => Err(fidl::Error::UnknownOrdinal {
15931                        ordinal: header.ordinal,
15932                        protocol_name:
15933                            <SecureMemMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
15934                    }),
15935                }))
15936            },
15937        )
15938    }
15939}
15940
15941/// SecureMem
15942///
15943/// The client is sysmem.  The server is securemem driver.
15944///
15945/// TEE - Trusted Execution Environment.
15946///
15947/// REE - Rich Execution Environment.
15948///
15949/// Enables sysmem to call the securemem driver to get any secure heaps
15950/// configured via the TEE (or via the securemem driver), and set any physical
15951/// secure heaps configured via sysmem.
15952///
15953/// Presently, dynamically-allocated secure heaps are configured via sysmem, as
15954/// it starts quite early during boot and can successfully reserve contiguous
15955/// physical memory.  Presently, fixed-location secure heaps are configured via
15956/// TEE, as the plumbing goes from the bootloader to the TEE.  However, this
15957/// protocol intentionally doesn't care which heaps are dynamically-allocated
15958/// and which are fixed-location.
15959#[derive(Debug)]
15960pub enum SecureMemRequest {
15961    /// Gets the physical address and length of any secure heap whose physical
15962    /// range is configured via the TEE.
15963    ///
15964    /// Presently, these will be fixed physical addresses and lengths, with the
15965    /// location plumbed via the TEE.
15966    ///
15967    /// This is preferred over ['fuchsia.hardware.sysmem.Sysmem/RegisterHeap']
15968    /// when there isn't any special heap-specific per-VMO setup or teardown
15969    /// required.
15970    ///
15971    /// The physical range must be secured/protected by the TEE before the
15972    /// securemem driver responds to this request with success.
15973    ///
15974    /// Sysmem should only call this once.  Returning zero heaps is not a
15975    /// failure.
15976    ///
15977    /// Errors:
15978    ///  * PROTOCOL_DEVIATION - called more than once.
15979    ///  * UNSPECIFIED - generic internal error (such as in communication
15980    ///    with TEE which doesn't generate zx_status_t errors).
15981    ///  * other errors are allowed; any other errors should be treated the same
15982    ///    as UNSPECIFIED.
15983    GetPhysicalSecureHeaps { responder: SecureMemGetPhysicalSecureHeapsResponder },
15984    /// Gets information about any secure heaps whose physical pages are not
15985    /// configured by the TEE, but by sysmem.
15986    ///
15987    /// Sysmem should only call this once. Returning zero heaps is not a
15988    /// failure.
15989    ///
15990    /// Errors:
15991    ///  * PROTOCOL_DEVIATION - called more than once.
15992    ///  * UNSPECIFIED - generic internal error (such as in communication
15993    ///    with TEE which doesn't generate zx_status_t errors).
15994    ///  * other errors are allowed; any other errors should be treated the same
15995    ///    as UNSPECIFIED.
15996    GetDynamicSecureHeaps { responder: SecureMemGetDynamicSecureHeapsResponder },
15997    /// This request from sysmem to the securemem driver gets the properties of
15998    /// a protected/secure heap.
15999    ///
16000    /// This only handles heaps with a single contiguous physical extent.
16001    ///
16002    /// The heap's entire physical range is indicated in case this request needs
16003    /// some physical space to auto-detect how many ranges are REE-usable.  Any
16004    /// temporary HW protection ranges will be deleted before this request
16005    /// completes.
16006    ///
16007    /// Errors:
16008    ///  * UNSPECIFIED - generic internal error (such as in communication
16009    ///    with TEE which doesn't generate zx_status_t errors).
16010    ///  * other errors are allowed; any other errors should be treated the same
16011    ///    as UNSPECIFIED.
16012    GetPhysicalSecureHeapProperties {
16013        payload: SecureMemGetPhysicalSecureHeapPropertiesRequest,
16014        responder: SecureMemGetPhysicalSecureHeapPropertiesResponder,
16015    },
16016    /// This request from sysmem to the securemem driver conveys a physical
16017    /// range to add, for a heap whose physical range(s) are set up via
16018    /// sysmem.
16019    ///
16020    /// Only sysmem can call this because only sysmem is handed the client end
16021    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
16022    /// securemem driver is the server end of this protocol.
16023    ///
16024    /// The securemem driver must configure all the covered offsets as protected
16025    /// before responding to this message with success.
16026    ///
16027    /// On failure, the securemem driver must ensure the protected range was not
16028    /// created.
16029    ///
16030    /// Sysmem must only call this up to once if dynamic_protection_ranges
16031    /// false.
16032    ///
16033    /// If dynamic_protection_ranges is true, sysmem can call this multiple
16034    /// times as long as the current number of ranges never exceeds
16035    /// max_protected_range_count.
16036    ///
16037    /// The caller must not attempt to add a range that matches an
16038    /// already-existing range.  Added ranges can overlap each other as long as
16039    /// no two ranges match exactly.
16040    ///
16041    /// Errors:
16042    ///   * PROTOCOL_DEVIATION - called more than once when
16043    ///     !dynamic_protection_ranges.  Adding a heap that would cause overall
16044    ///     heap count to exceed max_protected_range_count. Unexpected heap, or
16045    ///     range that doesn't conform to protected_range_granularity. See log.
16046    ///   * UNSPECIFIED - generic internal error (such as in communication
16047    ///     with TEE which doesn't generate zx_status_t errors).
16048    ///   * other errors are possible, such as from communication failures or
16049    ///     server propagation of failures.
16050    AddSecureHeapPhysicalRange {
16051        payload: SecureMemAddSecureHeapPhysicalRangeRequest,
16052        responder: SecureMemAddSecureHeapPhysicalRangeResponder,
16053    },
16054    /// This request from sysmem to the securemem driver conveys a physical
16055    /// range to delete, for a heap whose physical range(s) are set up via
16056    /// sysmem.
16057    ///
16058    /// Only sysmem can call this because only sysmem is handed the client end
16059    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
16060    /// securemem driver is the server end of this protocol.
16061    ///
16062    /// The securemem driver must configure all the covered offsets as not
16063    /// protected before responding to this message with success.
16064    ///
16065    /// On failure, the securemem driver must ensure the protected range was not
16066    /// deleted.
16067    ///
16068    /// Sysmem must not call this if dynamic_protection_ranges false.
16069    ///
16070    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
16071    /// on various ranges that exist at the time of the call.
16072    ///
16073    /// If any portion of the range being deleted is not also covered by another
16074    /// protected range, then any ongoing DMA to any part of the entire range
16075    /// may be interrupted / may fail, potentially in a way that's disruptive to
16076    /// the entire system (bus lockup or similar, depending on device details).
16077    /// Therefore, the caller must ensure that no ongoing DMA is occurring to
16078    /// any portion of the range being deleted, unless the caller has other
16079    /// active ranges covering every block of the range being deleted.  Ongoing
16080    /// DMA to/from blocks outside the range being deleted is never impacted by
16081    /// the deletion.
16082    ///
16083    /// Errors:
16084    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16085    ///     Unexpected heap, or range that doesn't conform to
16086    ///     protected_range_granularity.
16087    ///   * UNSPECIFIED - generic internal error (such as in communication
16088    ///     with TEE which doesn't generate zx_status_t errors).
16089    ///   * NOT_FOUND - the specified range is not found.
16090    ///   * other errors are possible, such as from communication failures or
16091    ///     server propagation of failures.
16092    DeleteSecureHeapPhysicalRange {
16093        payload: SecureMemDeleteSecureHeapPhysicalRangeRequest,
16094        responder: SecureMemDeleteSecureHeapPhysicalRangeResponder,
16095    },
16096    /// This request from sysmem to the securemem driver conveys a physical
16097    /// range to modify and its new base and length, for a heap whose physical
16098    /// range(s) are set up via sysmem.
16099    ///
16100    /// Only sysmem can call this because only sysmem is handed the client end
16101    /// of a FIDL channel serving this protocol, via RegisterSecureMem().  The
16102    /// securemem driver is the server end of this protocol.
16103    ///
16104    /// The securemem driver must configure the range to cover only the new
16105    /// offsets before responding to this message with success.
16106    ///
16107    /// On failure, the securemem driver must ensure the range was not changed.
16108    ///
16109    /// Sysmem must not call this if dynamic_protection_ranges false.  Sysmem
16110    /// must not call this if !is_mod_protected_range_available.
16111    ///
16112    /// If dynamic_protection_ranges is true, sysmem can call this repeatedly,
16113    /// on various ranges that exist at the time of the call.
16114    ///
16115    /// The range must only be modified at one end or the other, but not both.
16116    /// If the range is getting shorter, and the un-covered blocks are not
16117    /// covered by other active ranges, any ongoing DMA to the entire range
16118    /// that's geting shorter may fail in a way that disrupts the entire system
16119    /// (bus lockup or similar), so the caller must ensure that no DMA is
16120    /// ongoing to any portion of a range that is getting shorter, unless the
16121    /// blocks being un-covered by the modification to this range are all
16122    /// covered by other active ranges, in which case no disruption to ongoing
16123    /// DMA will occur.
16124    ///
16125    /// If a range is modified to become <= zero length, the range is deleted.
16126    ///
16127    /// Errors:
16128    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16129    ///     Unexpected heap, or old_range or new_range that doesn't conform to
16130    ///     protected_range_granularity, or old_range and new_range differ in
16131    ///     both begin and end (disallowed).
16132    ///   * UNSPECIFIED - generic internal error (such as in communication
16133    ///     with TEE which doesn't generate zx_status_t errors).
16134    ///   * NOT_FOUND - the specified range is not found.
16135    ///   * other errors are possible, such as from communication failures or
16136    ///     server propagation of failures.
16137    ModifySecureHeapPhysicalRange {
16138        payload: SecureMemModifySecureHeapPhysicalRangeRequest,
16139        responder: SecureMemModifySecureHeapPhysicalRangeResponder,
16140    },
16141    /// Zero a sub-range of a currently-existing physical range added via
16142    /// AddSecureHeapPhysicalRange().  The sub-range must be fully covered by
16143    /// exactly one physical range, and must not overlap with any other
16144    /// physical range.
16145    ///
16146    /// is_covering_range_explicit - When true, the covering range must be one
16147    ///     of the ranges explicitly created via AddSecureHeapPhysicalRange(),
16148    ///     possibly modified since.  When false, the covering range must not
16149    ///     be one of the ranges explicitly created via
16150    ///     AddSecureHeapPhysicalRange(), but the covering range must exist as
16151    ///     a covering range not created via AddSecureHeapPhysicalRange().  The
16152    ///     covering range is typically the entire physical range (or a range
16153    ///     which covers even more) of a heap configured by the TEE and whose
16154    ///     configuration is conveyed to sysmem via GetPhysicalSecureHeaps().
16155    ///
16156    /// Ongoing DMA is not disrupted by this request.
16157    ///
16158    /// Errors:
16159    ///   * PROTOCOL_DEVIATION - called when !dynamic_protection_ranges.
16160    ///     Unexpected heap.
16161    ///   * UNSPECIFIED - generic internal error (such as in communication
16162    ///     with TEE which doesn't generate zx_status_t errors).
16163    ///   * other errors are possible, such as from communication failures or
16164    ///     server propagation of failures.
16165    ZeroSubRange {
16166        payload: SecureMemZeroSubRangeRequest,
16167        responder: SecureMemZeroSubRangeResponder,
16168    },
16169    /// An interaction was received which does not match any known method.
16170    #[non_exhaustive]
16171    _UnknownMethod {
16172        /// Ordinal of the method that was called.
16173        ordinal: u64,
16174        control_handle: SecureMemControlHandle,
16175        method_type: fidl::MethodType,
16176    },
16177}
16178
16179impl SecureMemRequest {
16180    #[allow(irrefutable_let_patterns)]
16181    pub fn into_get_physical_secure_heaps(
16182        self,
16183    ) -> Option<(SecureMemGetPhysicalSecureHeapsResponder)> {
16184        if let SecureMemRequest::GetPhysicalSecureHeaps { responder } = self {
16185            Some((responder))
16186        } else {
16187            None
16188        }
16189    }
16190
16191    #[allow(irrefutable_let_patterns)]
16192    pub fn into_get_dynamic_secure_heaps(
16193        self,
16194    ) -> Option<(SecureMemGetDynamicSecureHeapsResponder)> {
16195        if let SecureMemRequest::GetDynamicSecureHeaps { responder } = self {
16196            Some((responder))
16197        } else {
16198            None
16199        }
16200    }
16201
16202    #[allow(irrefutable_let_patterns)]
16203    pub fn into_get_physical_secure_heap_properties(
16204        self,
16205    ) -> Option<(
16206        SecureMemGetPhysicalSecureHeapPropertiesRequest,
16207        SecureMemGetPhysicalSecureHeapPropertiesResponder,
16208    )> {
16209        if let SecureMemRequest::GetPhysicalSecureHeapProperties { payload, responder } = self {
16210            Some((payload, responder))
16211        } else {
16212            None
16213        }
16214    }
16215
16216    #[allow(irrefutable_let_patterns)]
16217    pub fn into_add_secure_heap_physical_range(
16218        self,
16219    ) -> Option<(
16220        SecureMemAddSecureHeapPhysicalRangeRequest,
16221        SecureMemAddSecureHeapPhysicalRangeResponder,
16222    )> {
16223        if let SecureMemRequest::AddSecureHeapPhysicalRange { payload, responder } = self {
16224            Some((payload, responder))
16225        } else {
16226            None
16227        }
16228    }
16229
16230    #[allow(irrefutable_let_patterns)]
16231    pub fn into_delete_secure_heap_physical_range(
16232        self,
16233    ) -> Option<(
16234        SecureMemDeleteSecureHeapPhysicalRangeRequest,
16235        SecureMemDeleteSecureHeapPhysicalRangeResponder,
16236    )> {
16237        if let SecureMemRequest::DeleteSecureHeapPhysicalRange { payload, responder } = self {
16238            Some((payload, responder))
16239        } else {
16240            None
16241        }
16242    }
16243
16244    #[allow(irrefutable_let_patterns)]
16245    pub fn into_modify_secure_heap_physical_range(
16246        self,
16247    ) -> Option<(
16248        SecureMemModifySecureHeapPhysicalRangeRequest,
16249        SecureMemModifySecureHeapPhysicalRangeResponder,
16250    )> {
16251        if let SecureMemRequest::ModifySecureHeapPhysicalRange { payload, responder } = self {
16252            Some((payload, responder))
16253        } else {
16254            None
16255        }
16256    }
16257
16258    #[allow(irrefutable_let_patterns)]
16259    pub fn into_zero_sub_range(
16260        self,
16261    ) -> Option<(SecureMemZeroSubRangeRequest, SecureMemZeroSubRangeResponder)> {
16262        if let SecureMemRequest::ZeroSubRange { payload, responder } = self {
16263            Some((payload, responder))
16264        } else {
16265            None
16266        }
16267    }
16268
16269    /// Name of the method defined in FIDL
16270    pub fn method_name(&self) -> &'static str {
16271        match *self {
16272            SecureMemRequest::GetPhysicalSecureHeaps { .. } => "get_physical_secure_heaps",
16273            SecureMemRequest::GetDynamicSecureHeaps { .. } => "get_dynamic_secure_heaps",
16274            SecureMemRequest::GetPhysicalSecureHeapProperties { .. } => {
16275                "get_physical_secure_heap_properties"
16276            }
16277            SecureMemRequest::AddSecureHeapPhysicalRange { .. } => "add_secure_heap_physical_range",
16278            SecureMemRequest::DeleteSecureHeapPhysicalRange { .. } => {
16279                "delete_secure_heap_physical_range"
16280            }
16281            SecureMemRequest::ModifySecureHeapPhysicalRange { .. } => {
16282                "modify_secure_heap_physical_range"
16283            }
16284            SecureMemRequest::ZeroSubRange { .. } => "zero_sub_range",
16285            SecureMemRequest::_UnknownMethod { method_type: fidl::MethodType::OneWay, .. } => {
16286                "unknown one-way method"
16287            }
16288            SecureMemRequest::_UnknownMethod { method_type: fidl::MethodType::TwoWay, .. } => {
16289                "unknown two-way method"
16290            }
16291        }
16292    }
16293}
16294
16295#[derive(Debug, Clone)]
16296pub struct SecureMemControlHandle {
16297    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
16298}
16299
16300impl fidl::endpoints::ControlHandle for SecureMemControlHandle {
16301    fn shutdown(&self) {
16302        self.inner.shutdown()
16303    }
16304    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
16305        self.inner.shutdown_with_epitaph(status)
16306    }
16307
16308    fn is_closed(&self) -> bool {
16309        self.inner.channel().is_closed()
16310    }
16311    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
16312        self.inner.channel().on_closed()
16313    }
16314
16315    #[cfg(target_os = "fuchsia")]
16316    fn signal_peer(
16317        &self,
16318        clear_mask: zx::Signals,
16319        set_mask: zx::Signals,
16320    ) -> Result<(), zx_status::Status> {
16321        use fidl::Peered;
16322        self.inner.channel().signal_peer(clear_mask, set_mask)
16323    }
16324}
16325
16326impl SecureMemControlHandle {}
16327
16328#[must_use = "FIDL methods require a response to be sent"]
16329#[derive(Debug)]
16330pub struct SecureMemGetPhysicalSecureHeapsResponder {
16331    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16332    tx_id: u32,
16333}
16334
16335/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16336/// if the responder is dropped without sending a response, so that the client
16337/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16338impl std::ops::Drop for SecureMemGetPhysicalSecureHeapsResponder {
16339    fn drop(&mut self) {
16340        self.control_handle.shutdown();
16341        // Safety: drops once, never accessed again
16342        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16343    }
16344}
16345
16346impl fidl::endpoints::Responder for SecureMemGetPhysicalSecureHeapsResponder {
16347    type ControlHandle = SecureMemControlHandle;
16348
16349    fn control_handle(&self) -> &SecureMemControlHandle {
16350        &self.control_handle
16351    }
16352
16353    fn drop_without_shutdown(mut self) {
16354        // Safety: drops once, never accessed again due to mem::forget
16355        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16356        // Prevent Drop from running (which would shut down the channel)
16357        std::mem::forget(self);
16358    }
16359}
16360
16361impl SecureMemGetPhysicalSecureHeapsResponder {
16362    /// Sends a response to the FIDL transaction.
16363    ///
16364    /// Sets the channel to shutdown if an error occurs.
16365    pub fn send(
16366        self,
16367        mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16368    ) -> Result<(), fidl::Error> {
16369        let _result = self.send_raw(result);
16370        if _result.is_err() {
16371            self.control_handle.shutdown();
16372        }
16373        self.drop_without_shutdown();
16374        _result
16375    }
16376
16377    /// Similar to "send" but does not shutdown the channel if an error occurs.
16378    pub fn send_no_shutdown_on_err(
16379        self,
16380        mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16381    ) -> Result<(), fidl::Error> {
16382        let _result = self.send_raw(result);
16383        self.drop_without_shutdown();
16384        _result
16385    }
16386
16387    fn send_raw(
16388        &self,
16389        mut result: Result<&SecureMemGetPhysicalSecureHeapsResponse, Error>,
16390    ) -> Result<(), fidl::Error> {
16391        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16392            SecureMemGetPhysicalSecureHeapsResponse,
16393            Error,
16394        >>(
16395            fidl::encoding::FlexibleResult::new(result),
16396            self.tx_id,
16397            0x38716300592073e3,
16398            fidl::encoding::DynamicFlags::FLEXIBLE,
16399        )
16400    }
16401}
16402
16403#[must_use = "FIDL methods require a response to be sent"]
16404#[derive(Debug)]
16405pub struct SecureMemGetDynamicSecureHeapsResponder {
16406    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16407    tx_id: u32,
16408}
16409
16410/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16411/// if the responder is dropped without sending a response, so that the client
16412/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16413impl std::ops::Drop for SecureMemGetDynamicSecureHeapsResponder {
16414    fn drop(&mut self) {
16415        self.control_handle.shutdown();
16416        // Safety: drops once, never accessed again
16417        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16418    }
16419}
16420
16421impl fidl::endpoints::Responder for SecureMemGetDynamicSecureHeapsResponder {
16422    type ControlHandle = SecureMemControlHandle;
16423
16424    fn control_handle(&self) -> &SecureMemControlHandle {
16425        &self.control_handle
16426    }
16427
16428    fn drop_without_shutdown(mut self) {
16429        // Safety: drops once, never accessed again due to mem::forget
16430        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16431        // Prevent Drop from running (which would shut down the channel)
16432        std::mem::forget(self);
16433    }
16434}
16435
16436impl SecureMemGetDynamicSecureHeapsResponder {
16437    /// Sends a response to the FIDL transaction.
16438    ///
16439    /// Sets the channel to shutdown if an error occurs.
16440    pub fn send(
16441        self,
16442        mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16443    ) -> Result<(), fidl::Error> {
16444        let _result = self.send_raw(result);
16445        if _result.is_err() {
16446            self.control_handle.shutdown();
16447        }
16448        self.drop_without_shutdown();
16449        _result
16450    }
16451
16452    /// Similar to "send" but does not shutdown the channel if an error occurs.
16453    pub fn send_no_shutdown_on_err(
16454        self,
16455        mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16456    ) -> Result<(), fidl::Error> {
16457        let _result = self.send_raw(result);
16458        self.drop_without_shutdown();
16459        _result
16460    }
16461
16462    fn send_raw(
16463        &self,
16464        mut result: Result<&SecureMemGetDynamicSecureHeapsResponse, Error>,
16465    ) -> Result<(), fidl::Error> {
16466        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16467            SecureMemGetDynamicSecureHeapsResponse,
16468            Error,
16469        >>(
16470            fidl::encoding::FlexibleResult::new(result),
16471            self.tx_id,
16472            0x1190847f99952834,
16473            fidl::encoding::DynamicFlags::FLEXIBLE,
16474        )
16475    }
16476}
16477
16478#[must_use = "FIDL methods require a response to be sent"]
16479#[derive(Debug)]
16480pub struct SecureMemGetPhysicalSecureHeapPropertiesResponder {
16481    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16482    tx_id: u32,
16483}
16484
16485/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16486/// if the responder is dropped without sending a response, so that the client
16487/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16488impl std::ops::Drop for SecureMemGetPhysicalSecureHeapPropertiesResponder {
16489    fn drop(&mut self) {
16490        self.control_handle.shutdown();
16491        // Safety: drops once, never accessed again
16492        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16493    }
16494}
16495
16496impl fidl::endpoints::Responder for SecureMemGetPhysicalSecureHeapPropertiesResponder {
16497    type ControlHandle = SecureMemControlHandle;
16498
16499    fn control_handle(&self) -> &SecureMemControlHandle {
16500        &self.control_handle
16501    }
16502
16503    fn drop_without_shutdown(mut self) {
16504        // Safety: drops once, never accessed again due to mem::forget
16505        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16506        // Prevent Drop from running (which would shut down the channel)
16507        std::mem::forget(self);
16508    }
16509}
16510
16511impl SecureMemGetPhysicalSecureHeapPropertiesResponder {
16512    /// Sends a response to the FIDL transaction.
16513    ///
16514    /// Sets the channel to shutdown if an error occurs.
16515    pub fn send(
16516        self,
16517        mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16518    ) -> Result<(), fidl::Error> {
16519        let _result = self.send_raw(result);
16520        if _result.is_err() {
16521            self.control_handle.shutdown();
16522        }
16523        self.drop_without_shutdown();
16524        _result
16525    }
16526
16527    /// Similar to "send" but does not shutdown the channel if an error occurs.
16528    pub fn send_no_shutdown_on_err(
16529        self,
16530        mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16531    ) -> Result<(), fidl::Error> {
16532        let _result = self.send_raw(result);
16533        self.drop_without_shutdown();
16534        _result
16535    }
16536
16537    fn send_raw(
16538        &self,
16539        mut result: Result<&SecureMemGetPhysicalSecureHeapPropertiesResponse, Error>,
16540    ) -> Result<(), fidl::Error> {
16541        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16542            SecureMemGetPhysicalSecureHeapPropertiesResponse,
16543            Error,
16544        >>(
16545            fidl::encoding::FlexibleResult::new(result),
16546            self.tx_id,
16547            0xc6f06889009c7bc,
16548            fidl::encoding::DynamicFlags::FLEXIBLE,
16549        )
16550    }
16551}
16552
16553#[must_use = "FIDL methods require a response to be sent"]
16554#[derive(Debug)]
16555pub struct SecureMemAddSecureHeapPhysicalRangeResponder {
16556    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16557    tx_id: u32,
16558}
16559
16560/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16561/// if the responder is dropped without sending a response, so that the client
16562/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16563impl std::ops::Drop for SecureMemAddSecureHeapPhysicalRangeResponder {
16564    fn drop(&mut self) {
16565        self.control_handle.shutdown();
16566        // Safety: drops once, never accessed again
16567        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16568    }
16569}
16570
16571impl fidl::endpoints::Responder for SecureMemAddSecureHeapPhysicalRangeResponder {
16572    type ControlHandle = SecureMemControlHandle;
16573
16574    fn control_handle(&self) -> &SecureMemControlHandle {
16575        &self.control_handle
16576    }
16577
16578    fn drop_without_shutdown(mut self) {
16579        // Safety: drops once, never accessed again due to mem::forget
16580        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16581        // Prevent Drop from running (which would shut down the channel)
16582        std::mem::forget(self);
16583    }
16584}
16585
16586impl SecureMemAddSecureHeapPhysicalRangeResponder {
16587    /// Sends a response to the FIDL transaction.
16588    ///
16589    /// Sets the channel to shutdown if an error occurs.
16590    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16591        let _result = self.send_raw(result);
16592        if _result.is_err() {
16593            self.control_handle.shutdown();
16594        }
16595        self.drop_without_shutdown();
16596        _result
16597    }
16598
16599    /// Similar to "send" but does not shutdown the channel if an error occurs.
16600    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16601        let _result = self.send_raw(result);
16602        self.drop_without_shutdown();
16603        _result
16604    }
16605
16606    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16607        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16608            fidl::encoding::EmptyStruct,
16609            Error,
16610        >>(
16611            fidl::encoding::FlexibleResult::new(result),
16612            self.tx_id,
16613            0x35f695b9b6c7217a,
16614            fidl::encoding::DynamicFlags::FLEXIBLE,
16615        )
16616    }
16617}
16618
16619#[must_use = "FIDL methods require a response to be sent"]
16620#[derive(Debug)]
16621pub struct SecureMemDeleteSecureHeapPhysicalRangeResponder {
16622    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16623    tx_id: u32,
16624}
16625
16626/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16627/// if the responder is dropped without sending a response, so that the client
16628/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16629impl std::ops::Drop for SecureMemDeleteSecureHeapPhysicalRangeResponder {
16630    fn drop(&mut self) {
16631        self.control_handle.shutdown();
16632        // Safety: drops once, never accessed again
16633        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16634    }
16635}
16636
16637impl fidl::endpoints::Responder for SecureMemDeleteSecureHeapPhysicalRangeResponder {
16638    type ControlHandle = SecureMemControlHandle;
16639
16640    fn control_handle(&self) -> &SecureMemControlHandle {
16641        &self.control_handle
16642    }
16643
16644    fn drop_without_shutdown(mut self) {
16645        // Safety: drops once, never accessed again due to mem::forget
16646        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16647        // Prevent Drop from running (which would shut down the channel)
16648        std::mem::forget(self);
16649    }
16650}
16651
16652impl SecureMemDeleteSecureHeapPhysicalRangeResponder {
16653    /// Sends a response to the FIDL transaction.
16654    ///
16655    /// Sets the channel to shutdown if an error occurs.
16656    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16657        let _result = self.send_raw(result);
16658        if _result.is_err() {
16659            self.control_handle.shutdown();
16660        }
16661        self.drop_without_shutdown();
16662        _result
16663    }
16664
16665    /// Similar to "send" but does not shutdown the channel if an error occurs.
16666    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16667        let _result = self.send_raw(result);
16668        self.drop_without_shutdown();
16669        _result
16670    }
16671
16672    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16673        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16674            fidl::encoding::EmptyStruct,
16675            Error,
16676        >>(
16677            fidl::encoding::FlexibleResult::new(result),
16678            self.tx_id,
16679            0xeaa58c650264c9e,
16680            fidl::encoding::DynamicFlags::FLEXIBLE,
16681        )
16682    }
16683}
16684
16685#[must_use = "FIDL methods require a response to be sent"]
16686#[derive(Debug)]
16687pub struct SecureMemModifySecureHeapPhysicalRangeResponder {
16688    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16689    tx_id: u32,
16690}
16691
16692/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16693/// if the responder is dropped without sending a response, so that the client
16694/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16695impl std::ops::Drop for SecureMemModifySecureHeapPhysicalRangeResponder {
16696    fn drop(&mut self) {
16697        self.control_handle.shutdown();
16698        // Safety: drops once, never accessed again
16699        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16700    }
16701}
16702
16703impl fidl::endpoints::Responder for SecureMemModifySecureHeapPhysicalRangeResponder {
16704    type ControlHandle = SecureMemControlHandle;
16705
16706    fn control_handle(&self) -> &SecureMemControlHandle {
16707        &self.control_handle
16708    }
16709
16710    fn drop_without_shutdown(mut self) {
16711        // Safety: drops once, never accessed again due to mem::forget
16712        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16713        // Prevent Drop from running (which would shut down the channel)
16714        std::mem::forget(self);
16715    }
16716}
16717
16718impl SecureMemModifySecureHeapPhysicalRangeResponder {
16719    /// Sends a response to the FIDL transaction.
16720    ///
16721    /// Sets the channel to shutdown if an error occurs.
16722    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16723        let _result = self.send_raw(result);
16724        if _result.is_err() {
16725            self.control_handle.shutdown();
16726        }
16727        self.drop_without_shutdown();
16728        _result
16729    }
16730
16731    /// Similar to "send" but does not shutdown the channel if an error occurs.
16732    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16733        let _result = self.send_raw(result);
16734        self.drop_without_shutdown();
16735        _result
16736    }
16737
16738    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16739        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16740            fidl::encoding::EmptyStruct,
16741            Error,
16742        >>(
16743            fidl::encoding::FlexibleResult::new(result),
16744            self.tx_id,
16745            0x60b7448aa1187734,
16746            fidl::encoding::DynamicFlags::FLEXIBLE,
16747        )
16748    }
16749}
16750
16751#[must_use = "FIDL methods require a response to be sent"]
16752#[derive(Debug)]
16753pub struct SecureMemZeroSubRangeResponder {
16754    control_handle: std::mem::ManuallyDrop<SecureMemControlHandle>,
16755    tx_id: u32,
16756}
16757
16758/// Set the the channel to be shutdown (see [`SecureMemControlHandle::shutdown`])
16759/// if the responder is dropped without sending a response, so that the client
16760/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
16761impl std::ops::Drop for SecureMemZeroSubRangeResponder {
16762    fn drop(&mut self) {
16763        self.control_handle.shutdown();
16764        // Safety: drops once, never accessed again
16765        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16766    }
16767}
16768
16769impl fidl::endpoints::Responder for SecureMemZeroSubRangeResponder {
16770    type ControlHandle = SecureMemControlHandle;
16771
16772    fn control_handle(&self) -> &SecureMemControlHandle {
16773        &self.control_handle
16774    }
16775
16776    fn drop_without_shutdown(mut self) {
16777        // Safety: drops once, never accessed again due to mem::forget
16778        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
16779        // Prevent Drop from running (which would shut down the channel)
16780        std::mem::forget(self);
16781    }
16782}
16783
16784impl SecureMemZeroSubRangeResponder {
16785    /// Sends a response to the FIDL transaction.
16786    ///
16787    /// Sets the channel to shutdown if an error occurs.
16788    pub fn send(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16789        let _result = self.send_raw(result);
16790        if _result.is_err() {
16791            self.control_handle.shutdown();
16792        }
16793        self.drop_without_shutdown();
16794        _result
16795    }
16796
16797    /// Similar to "send" but does not shutdown the channel if an error occurs.
16798    pub fn send_no_shutdown_on_err(self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16799        let _result = self.send_raw(result);
16800        self.drop_without_shutdown();
16801        _result
16802    }
16803
16804    fn send_raw(&self, mut result: Result<(), Error>) -> Result<(), fidl::Error> {
16805        self.control_handle.inner.send::<fidl::encoding::FlexibleResultType<
16806            fidl::encoding::EmptyStruct,
16807            Error,
16808        >>(
16809            fidl::encoding::FlexibleResult::new(result),
16810            self.tx_id,
16811            0x5b25b7901a385ce5,
16812            fidl::encoding::DynamicFlags::FLEXIBLE,
16813        )
16814    }
16815}
16816
16817mod internal {
16818    use super::*;
16819
16820    impl AllocatorAllocateNonSharedCollectionRequest {
16821        #[inline(always)]
16822        fn max_ordinal_present(&self) -> u64 {
16823            if let Some(_) = self.collection_request {
16824                return 1;
16825            }
16826            0
16827        }
16828    }
16829
16830    impl fidl::encoding::ResourceTypeMarker for AllocatorAllocateNonSharedCollectionRequest {
16831        type Borrowed<'a> = &'a mut Self;
16832        fn take_or_borrow<'a>(
16833            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
16834        ) -> Self::Borrowed<'a> {
16835            value
16836        }
16837    }
16838
16839    unsafe impl fidl::encoding::TypeMarker for AllocatorAllocateNonSharedCollectionRequest {
16840        type Owned = Self;
16841
16842        #[inline(always)]
16843        fn inline_align(_context: fidl::encoding::Context) -> usize {
16844            8
16845        }
16846
16847        #[inline(always)]
16848        fn inline_size(_context: fidl::encoding::Context) -> usize {
16849            16
16850        }
16851    }
16852
16853    unsafe impl
16854        fidl::encoding::Encode<
16855            AllocatorAllocateNonSharedCollectionRequest,
16856            fidl::encoding::DefaultFuchsiaResourceDialect,
16857        > for &mut AllocatorAllocateNonSharedCollectionRequest
16858    {
16859        unsafe fn encode(
16860            self,
16861            encoder: &mut fidl::encoding::Encoder<
16862                '_,
16863                fidl::encoding::DefaultFuchsiaResourceDialect,
16864            >,
16865            offset: usize,
16866            mut depth: fidl::encoding::Depth,
16867        ) -> fidl::Result<()> {
16868            encoder.debug_check_bounds::<AllocatorAllocateNonSharedCollectionRequest>(offset);
16869            // Vector header
16870            let max_ordinal: u64 = self.max_ordinal_present();
16871            encoder.write_num(max_ordinal, offset);
16872            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
16873            // Calling encoder.out_of_line_offset(0) is not allowed.
16874            if max_ordinal == 0 {
16875                return Ok(());
16876            }
16877            depth.increment()?;
16878            let envelope_size = 8;
16879            let bytes_len = max_ordinal as usize * envelope_size;
16880            #[allow(unused_variables)]
16881            let offset = encoder.out_of_line_offset(bytes_len);
16882            let mut _prev_end_offset: usize = 0;
16883            if 1 > max_ordinal {
16884                return Ok(());
16885            }
16886
16887            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
16888            // are envelope_size bytes.
16889            let cur_offset: usize = (1 - 1) * envelope_size;
16890
16891            // Zero reserved fields.
16892            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
16893
16894            // Safety:
16895            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
16896            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
16897            //   envelope_size bytes, there is always sufficient room.
16898            fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>, fidl::encoding::DefaultFuchsiaResourceDialect>(
16899            self.collection_request.as_mut().map(<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
16900            encoder, offset + cur_offset, depth
16901        )?;
16902
16903            _prev_end_offset = cur_offset + envelope_size;
16904
16905            Ok(())
16906        }
16907    }
16908
16909    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
16910        for AllocatorAllocateNonSharedCollectionRequest
16911    {
16912        #[inline(always)]
16913        fn new_empty() -> Self {
16914            Self::default()
16915        }
16916
16917        unsafe fn decode(
16918            &mut self,
16919            decoder: &mut fidl::encoding::Decoder<
16920                '_,
16921                fidl::encoding::DefaultFuchsiaResourceDialect,
16922            >,
16923            offset: usize,
16924            mut depth: fidl::encoding::Depth,
16925        ) -> fidl::Result<()> {
16926            decoder.debug_check_bounds::<Self>(offset);
16927            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
16928                None => return Err(fidl::Error::NotNullable),
16929                Some(len) => len,
16930            };
16931            // Calling decoder.out_of_line_offset(0) is not allowed.
16932            if len == 0 {
16933                return Ok(());
16934            };
16935            depth.increment()?;
16936            let envelope_size = 8;
16937            let bytes_len = len * envelope_size;
16938            let offset = decoder.out_of_line_offset(bytes_len)?;
16939            // Decode the envelope for each type.
16940            let mut _next_ordinal_to_read = 0;
16941            let mut next_offset = offset;
16942            let end_offset = offset + bytes_len;
16943            _next_ordinal_to_read += 1;
16944            if next_offset >= end_offset {
16945                return Ok(());
16946            }
16947
16948            // Decode unknown envelopes for gaps in ordinals.
16949            while _next_ordinal_to_read < 1 {
16950                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
16951                _next_ordinal_to_read += 1;
16952                next_offset += envelope_size;
16953            }
16954
16955            let next_out_of_line = decoder.next_out_of_line();
16956            let handles_before = decoder.remaining_handles();
16957            if let Some((inlined, num_bytes, num_handles)) =
16958                fidl::encoding::decode_envelope_header(decoder, next_offset)?
16959            {
16960                let member_inline_size = <fidl::encoding::Endpoint<
16961                    fidl::endpoints::ServerEnd<BufferCollectionMarker>,
16962                > as fidl::encoding::TypeMarker>::inline_size(
16963                    decoder.context
16964                );
16965                if inlined != (member_inline_size <= 4) {
16966                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
16967                }
16968                let inner_offset;
16969                let mut inner_depth = depth.clone();
16970                if inlined {
16971                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
16972                    inner_offset = next_offset;
16973                } else {
16974                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
16975                    inner_depth.increment()?;
16976                }
16977                let val_ref = self.collection_request.get_or_insert_with(|| {
16978                    fidl::new_empty!(
16979                        fidl::encoding::Endpoint<
16980                            fidl::endpoints::ServerEnd<BufferCollectionMarker>,
16981                        >,
16982                        fidl::encoding::DefaultFuchsiaResourceDialect
16983                    )
16984                });
16985                fidl::decode!(
16986                    fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
16987                    fidl::encoding::DefaultFuchsiaResourceDialect,
16988                    val_ref,
16989                    decoder,
16990                    inner_offset,
16991                    inner_depth
16992                )?;
16993                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
16994                {
16995                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
16996                }
16997                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
16998                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
16999                }
17000            }
17001
17002            next_offset += envelope_size;
17003
17004            // Decode the remaining unknown envelopes.
17005            while next_offset < end_offset {
17006                _next_ordinal_to_read += 1;
17007                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17008                next_offset += envelope_size;
17009            }
17010
17011            Ok(())
17012        }
17013    }
17014
17015    impl AllocatorAllocateSharedCollectionRequest {
17016        #[inline(always)]
17017        fn max_ordinal_present(&self) -> u64 {
17018            if let Some(_) = self.token_request {
17019                return 1;
17020            }
17021            0
17022        }
17023    }
17024
17025    impl fidl::encoding::ResourceTypeMarker for AllocatorAllocateSharedCollectionRequest {
17026        type Borrowed<'a> = &'a mut Self;
17027        fn take_or_borrow<'a>(
17028            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17029        ) -> Self::Borrowed<'a> {
17030            value
17031        }
17032    }
17033
17034    unsafe impl fidl::encoding::TypeMarker for AllocatorAllocateSharedCollectionRequest {
17035        type Owned = Self;
17036
17037        #[inline(always)]
17038        fn inline_align(_context: fidl::encoding::Context) -> usize {
17039            8
17040        }
17041
17042        #[inline(always)]
17043        fn inline_size(_context: fidl::encoding::Context) -> usize {
17044            16
17045        }
17046    }
17047
17048    unsafe impl
17049        fidl::encoding::Encode<
17050            AllocatorAllocateSharedCollectionRequest,
17051            fidl::encoding::DefaultFuchsiaResourceDialect,
17052        > for &mut AllocatorAllocateSharedCollectionRequest
17053    {
17054        unsafe fn encode(
17055            self,
17056            encoder: &mut fidl::encoding::Encoder<
17057                '_,
17058                fidl::encoding::DefaultFuchsiaResourceDialect,
17059            >,
17060            offset: usize,
17061            mut depth: fidl::encoding::Depth,
17062        ) -> fidl::Result<()> {
17063            encoder.debug_check_bounds::<AllocatorAllocateSharedCollectionRequest>(offset);
17064            // Vector header
17065            let max_ordinal: u64 = self.max_ordinal_present();
17066            encoder.write_num(max_ordinal, offset);
17067            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17068            // Calling encoder.out_of_line_offset(0) is not allowed.
17069            if max_ordinal == 0 {
17070                return Ok(());
17071            }
17072            depth.increment()?;
17073            let envelope_size = 8;
17074            let bytes_len = max_ordinal as usize * envelope_size;
17075            #[allow(unused_variables)]
17076            let offset = encoder.out_of_line_offset(bytes_len);
17077            let mut _prev_end_offset: usize = 0;
17078            if 1 > max_ordinal {
17079                return Ok(());
17080            }
17081
17082            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17083            // are envelope_size bytes.
17084            let cur_offset: usize = (1 - 1) * envelope_size;
17085
17086            // Zero reserved fields.
17087            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17088
17089            // Safety:
17090            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17091            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17092            //   envelope_size bytes, there is always sufficient room.
17093            fidl::encoding::encode_in_envelope_optional::<
17094                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
17095                fidl::encoding::DefaultFuchsiaResourceDialect,
17096            >(
17097                self.token_request.as_mut().map(
17098                    <fidl::encoding::Endpoint<
17099                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17100                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17101                ),
17102                encoder,
17103                offset + cur_offset,
17104                depth,
17105            )?;
17106
17107            _prev_end_offset = cur_offset + envelope_size;
17108
17109            Ok(())
17110        }
17111    }
17112
17113    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17114        for AllocatorAllocateSharedCollectionRequest
17115    {
17116        #[inline(always)]
17117        fn new_empty() -> Self {
17118            Self::default()
17119        }
17120
17121        unsafe fn decode(
17122            &mut self,
17123            decoder: &mut fidl::encoding::Decoder<
17124                '_,
17125                fidl::encoding::DefaultFuchsiaResourceDialect,
17126            >,
17127            offset: usize,
17128            mut depth: fidl::encoding::Depth,
17129        ) -> fidl::Result<()> {
17130            decoder.debug_check_bounds::<Self>(offset);
17131            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17132                None => return Err(fidl::Error::NotNullable),
17133                Some(len) => len,
17134            };
17135            // Calling decoder.out_of_line_offset(0) is not allowed.
17136            if len == 0 {
17137                return Ok(());
17138            };
17139            depth.increment()?;
17140            let envelope_size = 8;
17141            let bytes_len = len * envelope_size;
17142            let offset = decoder.out_of_line_offset(bytes_len)?;
17143            // Decode the envelope for each type.
17144            let mut _next_ordinal_to_read = 0;
17145            let mut next_offset = offset;
17146            let end_offset = offset + bytes_len;
17147            _next_ordinal_to_read += 1;
17148            if next_offset >= end_offset {
17149                return Ok(());
17150            }
17151
17152            // Decode unknown envelopes for gaps in ordinals.
17153            while _next_ordinal_to_read < 1 {
17154                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17155                _next_ordinal_to_read += 1;
17156                next_offset += envelope_size;
17157            }
17158
17159            let next_out_of_line = decoder.next_out_of_line();
17160            let handles_before = decoder.remaining_handles();
17161            if let Some((inlined, num_bytes, num_handles)) =
17162                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17163            {
17164                let member_inline_size = <fidl::encoding::Endpoint<
17165                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17166                > as fidl::encoding::TypeMarker>::inline_size(
17167                    decoder.context
17168                );
17169                if inlined != (member_inline_size <= 4) {
17170                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17171                }
17172                let inner_offset;
17173                let mut inner_depth = depth.clone();
17174                if inlined {
17175                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17176                    inner_offset = next_offset;
17177                } else {
17178                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17179                    inner_depth.increment()?;
17180                }
17181                let val_ref = self.token_request.get_or_insert_with(|| {
17182                    fidl::new_empty!(
17183                        fidl::encoding::Endpoint<
17184                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17185                        >,
17186                        fidl::encoding::DefaultFuchsiaResourceDialect
17187                    )
17188                });
17189                fidl::decode!(
17190                    fidl::encoding::Endpoint<
17191                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
17192                    >,
17193                    fidl::encoding::DefaultFuchsiaResourceDialect,
17194                    val_ref,
17195                    decoder,
17196                    inner_offset,
17197                    inner_depth
17198                )?;
17199                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17200                {
17201                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17202                }
17203                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17204                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17205                }
17206            }
17207
17208            next_offset += envelope_size;
17209
17210            // Decode the remaining unknown envelopes.
17211            while next_offset < end_offset {
17212                _next_ordinal_to_read += 1;
17213                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17214                next_offset += envelope_size;
17215            }
17216
17217            Ok(())
17218        }
17219    }
17220
17221    impl AllocatorBindSharedCollectionRequest {
17222        #[inline(always)]
17223        fn max_ordinal_present(&self) -> u64 {
17224            if let Some(_) = self.buffer_collection_request {
17225                return 2;
17226            }
17227            if let Some(_) = self.token {
17228                return 1;
17229            }
17230            0
17231        }
17232    }
17233
17234    impl fidl::encoding::ResourceTypeMarker for AllocatorBindSharedCollectionRequest {
17235        type Borrowed<'a> = &'a mut Self;
17236        fn take_or_borrow<'a>(
17237            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17238        ) -> Self::Borrowed<'a> {
17239            value
17240        }
17241    }
17242
17243    unsafe impl fidl::encoding::TypeMarker for AllocatorBindSharedCollectionRequest {
17244        type Owned = Self;
17245
17246        #[inline(always)]
17247        fn inline_align(_context: fidl::encoding::Context) -> usize {
17248            8
17249        }
17250
17251        #[inline(always)]
17252        fn inline_size(_context: fidl::encoding::Context) -> usize {
17253            16
17254        }
17255    }
17256
17257    unsafe impl
17258        fidl::encoding::Encode<
17259            AllocatorBindSharedCollectionRequest,
17260            fidl::encoding::DefaultFuchsiaResourceDialect,
17261        > for &mut AllocatorBindSharedCollectionRequest
17262    {
17263        unsafe fn encode(
17264            self,
17265            encoder: &mut fidl::encoding::Encoder<
17266                '_,
17267                fidl::encoding::DefaultFuchsiaResourceDialect,
17268            >,
17269            offset: usize,
17270            mut depth: fidl::encoding::Depth,
17271        ) -> fidl::Result<()> {
17272            encoder.debug_check_bounds::<AllocatorBindSharedCollectionRequest>(offset);
17273            // Vector header
17274            let max_ordinal: u64 = self.max_ordinal_present();
17275            encoder.write_num(max_ordinal, offset);
17276            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17277            // Calling encoder.out_of_line_offset(0) is not allowed.
17278            if max_ordinal == 0 {
17279                return Ok(());
17280            }
17281            depth.increment()?;
17282            let envelope_size = 8;
17283            let bytes_len = max_ordinal as usize * envelope_size;
17284            #[allow(unused_variables)]
17285            let offset = encoder.out_of_line_offset(bytes_len);
17286            let mut _prev_end_offset: usize = 0;
17287            if 1 > max_ordinal {
17288                return Ok(());
17289            }
17290
17291            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17292            // are envelope_size bytes.
17293            let cur_offset: usize = (1 - 1) * envelope_size;
17294
17295            // Zero reserved fields.
17296            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17297
17298            // Safety:
17299            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17300            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17301            //   envelope_size bytes, there is always sufficient room.
17302            fidl::encoding::encode_in_envelope_optional::<
17303                fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>>,
17304                fidl::encoding::DefaultFuchsiaResourceDialect,
17305            >(
17306                self.token.as_mut().map(
17307                    <fidl::encoding::Endpoint<
17308                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17309                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17310                ),
17311                encoder,
17312                offset + cur_offset,
17313                depth,
17314            )?;
17315
17316            _prev_end_offset = cur_offset + envelope_size;
17317            if 2 > max_ordinal {
17318                return Ok(());
17319            }
17320
17321            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17322            // are envelope_size bytes.
17323            let cur_offset: usize = (2 - 1) * envelope_size;
17324
17325            // Zero reserved fields.
17326            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17327
17328            // Safety:
17329            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17330            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17331            //   envelope_size bytes, there is always sufficient room.
17332            fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>, fidl::encoding::DefaultFuchsiaResourceDialect>(
17333            self.buffer_collection_request.as_mut().map(<fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
17334            encoder, offset + cur_offset, depth
17335        )?;
17336
17337            _prev_end_offset = cur_offset + envelope_size;
17338
17339            Ok(())
17340        }
17341    }
17342
17343    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17344        for AllocatorBindSharedCollectionRequest
17345    {
17346        #[inline(always)]
17347        fn new_empty() -> Self {
17348            Self::default()
17349        }
17350
17351        unsafe fn decode(
17352            &mut self,
17353            decoder: &mut fidl::encoding::Decoder<
17354                '_,
17355                fidl::encoding::DefaultFuchsiaResourceDialect,
17356            >,
17357            offset: usize,
17358            mut depth: fidl::encoding::Depth,
17359        ) -> fidl::Result<()> {
17360            decoder.debug_check_bounds::<Self>(offset);
17361            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17362                None => return Err(fidl::Error::NotNullable),
17363                Some(len) => len,
17364            };
17365            // Calling decoder.out_of_line_offset(0) is not allowed.
17366            if len == 0 {
17367                return Ok(());
17368            };
17369            depth.increment()?;
17370            let envelope_size = 8;
17371            let bytes_len = len * envelope_size;
17372            let offset = decoder.out_of_line_offset(bytes_len)?;
17373            // Decode the envelope for each type.
17374            let mut _next_ordinal_to_read = 0;
17375            let mut next_offset = offset;
17376            let end_offset = offset + bytes_len;
17377            _next_ordinal_to_read += 1;
17378            if next_offset >= end_offset {
17379                return Ok(());
17380            }
17381
17382            // Decode unknown envelopes for gaps in ordinals.
17383            while _next_ordinal_to_read < 1 {
17384                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17385                _next_ordinal_to_read += 1;
17386                next_offset += envelope_size;
17387            }
17388
17389            let next_out_of_line = decoder.next_out_of_line();
17390            let handles_before = decoder.remaining_handles();
17391            if let Some((inlined, num_bytes, num_handles)) =
17392                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17393            {
17394                let member_inline_size = <fidl::encoding::Endpoint<
17395                    fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17396                > as fidl::encoding::TypeMarker>::inline_size(
17397                    decoder.context
17398                );
17399                if inlined != (member_inline_size <= 4) {
17400                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17401                }
17402                let inner_offset;
17403                let mut inner_depth = depth.clone();
17404                if inlined {
17405                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17406                    inner_offset = next_offset;
17407                } else {
17408                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17409                    inner_depth.increment()?;
17410                }
17411                let val_ref = self.token.get_or_insert_with(|| {
17412                    fidl::new_empty!(
17413                        fidl::encoding::Endpoint<
17414                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17415                        >,
17416                        fidl::encoding::DefaultFuchsiaResourceDialect
17417                    )
17418                });
17419                fidl::decode!(
17420                    fidl::encoding::Endpoint<
17421                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
17422                    >,
17423                    fidl::encoding::DefaultFuchsiaResourceDialect,
17424                    val_ref,
17425                    decoder,
17426                    inner_offset,
17427                    inner_depth
17428                )?;
17429                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17430                {
17431                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17432                }
17433                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17434                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17435                }
17436            }
17437
17438            next_offset += envelope_size;
17439            _next_ordinal_to_read += 1;
17440            if next_offset >= end_offset {
17441                return Ok(());
17442            }
17443
17444            // Decode unknown envelopes for gaps in ordinals.
17445            while _next_ordinal_to_read < 2 {
17446                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17447                _next_ordinal_to_read += 1;
17448                next_offset += envelope_size;
17449            }
17450
17451            let next_out_of_line = decoder.next_out_of_line();
17452            let handles_before = decoder.remaining_handles();
17453            if let Some((inlined, num_bytes, num_handles)) =
17454                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17455            {
17456                let member_inline_size = <fidl::encoding::Endpoint<
17457                    fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17458                > as fidl::encoding::TypeMarker>::inline_size(
17459                    decoder.context
17460                );
17461                if inlined != (member_inline_size <= 4) {
17462                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17463                }
17464                let inner_offset;
17465                let mut inner_depth = depth.clone();
17466                if inlined {
17467                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17468                    inner_offset = next_offset;
17469                } else {
17470                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17471                    inner_depth.increment()?;
17472                }
17473                let val_ref = self.buffer_collection_request.get_or_insert_with(|| {
17474                    fidl::new_empty!(
17475                        fidl::encoding::Endpoint<
17476                            fidl::endpoints::ServerEnd<BufferCollectionMarker>,
17477                        >,
17478                        fidl::encoding::DefaultFuchsiaResourceDialect
17479                    )
17480                });
17481                fidl::decode!(
17482                    fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionMarker>>,
17483                    fidl::encoding::DefaultFuchsiaResourceDialect,
17484                    val_ref,
17485                    decoder,
17486                    inner_offset,
17487                    inner_depth
17488                )?;
17489                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17490                {
17491                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17492                }
17493                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17494                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17495                }
17496            }
17497
17498            next_offset += envelope_size;
17499
17500            // Decode the remaining unknown envelopes.
17501            while next_offset < end_offset {
17502                _next_ordinal_to_read += 1;
17503                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17504                next_offset += envelope_size;
17505            }
17506
17507            Ok(())
17508        }
17509    }
17510
17511    impl AllocatorGetVmoInfoRequest {
17512        #[inline(always)]
17513        fn max_ordinal_present(&self) -> u64 {
17514            if let Some(_) = self.vmo {
17515                return 1;
17516            }
17517            0
17518        }
17519    }
17520
17521    impl fidl::encoding::ResourceTypeMarker for AllocatorGetVmoInfoRequest {
17522        type Borrowed<'a> = &'a mut Self;
17523        fn take_or_borrow<'a>(
17524            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17525        ) -> Self::Borrowed<'a> {
17526            value
17527        }
17528    }
17529
17530    unsafe impl fidl::encoding::TypeMarker for AllocatorGetVmoInfoRequest {
17531        type Owned = Self;
17532
17533        #[inline(always)]
17534        fn inline_align(_context: fidl::encoding::Context) -> usize {
17535            8
17536        }
17537
17538        #[inline(always)]
17539        fn inline_size(_context: fidl::encoding::Context) -> usize {
17540            16
17541        }
17542    }
17543
17544    unsafe impl
17545        fidl::encoding::Encode<
17546            AllocatorGetVmoInfoRequest,
17547            fidl::encoding::DefaultFuchsiaResourceDialect,
17548        > for &mut AllocatorGetVmoInfoRequest
17549    {
17550        unsafe fn encode(
17551            self,
17552            encoder: &mut fidl::encoding::Encoder<
17553                '_,
17554                fidl::encoding::DefaultFuchsiaResourceDialect,
17555            >,
17556            offset: usize,
17557            mut depth: fidl::encoding::Depth,
17558        ) -> fidl::Result<()> {
17559            encoder.debug_check_bounds::<AllocatorGetVmoInfoRequest>(offset);
17560            // Vector header
17561            let max_ordinal: u64 = self.max_ordinal_present();
17562            encoder.write_num(max_ordinal, offset);
17563            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17564            // Calling encoder.out_of_line_offset(0) is not allowed.
17565            if max_ordinal == 0 {
17566                return Ok(());
17567            }
17568            depth.increment()?;
17569            let envelope_size = 8;
17570            let bytes_len = max_ordinal as usize * envelope_size;
17571            #[allow(unused_variables)]
17572            let offset = encoder.out_of_line_offset(bytes_len);
17573            let mut _prev_end_offset: usize = 0;
17574            if 1 > max_ordinal {
17575                return Ok(());
17576            }
17577
17578            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17579            // are envelope_size bytes.
17580            let cur_offset: usize = (1 - 1) * envelope_size;
17581
17582            // Zero reserved fields.
17583            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17584
17585            // Safety:
17586            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17587            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17588            //   envelope_size bytes, there is always sufficient room.
17589            fidl::encoding::encode_in_envelope_optional::<
17590                fidl::encoding::HandleType<
17591                    fidl::Vmo,
17592                    { fidl::ObjectType::VMO.into_raw() },
17593                    2147483648,
17594                >,
17595                fidl::encoding::DefaultFuchsiaResourceDialect,
17596            >(
17597                self.vmo.as_mut().map(
17598                    <fidl::encoding::HandleType<
17599                        fidl::Vmo,
17600                        { fidl::ObjectType::VMO.into_raw() },
17601                        2147483648,
17602                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17603                ),
17604                encoder,
17605                offset + cur_offset,
17606                depth,
17607            )?;
17608
17609            _prev_end_offset = cur_offset + envelope_size;
17610
17611            Ok(())
17612        }
17613    }
17614
17615    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17616        for AllocatorGetVmoInfoRequest
17617    {
17618        #[inline(always)]
17619        fn new_empty() -> Self {
17620            Self::default()
17621        }
17622
17623        unsafe fn decode(
17624            &mut self,
17625            decoder: &mut fidl::encoding::Decoder<
17626                '_,
17627                fidl::encoding::DefaultFuchsiaResourceDialect,
17628            >,
17629            offset: usize,
17630            mut depth: fidl::encoding::Depth,
17631        ) -> fidl::Result<()> {
17632            decoder.debug_check_bounds::<Self>(offset);
17633            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17634                None => return Err(fidl::Error::NotNullable),
17635                Some(len) => len,
17636            };
17637            // Calling decoder.out_of_line_offset(0) is not allowed.
17638            if len == 0 {
17639                return Ok(());
17640            };
17641            depth.increment()?;
17642            let envelope_size = 8;
17643            let bytes_len = len * envelope_size;
17644            let offset = decoder.out_of_line_offset(bytes_len)?;
17645            // Decode the envelope for each type.
17646            let mut _next_ordinal_to_read = 0;
17647            let mut next_offset = offset;
17648            let end_offset = offset + bytes_len;
17649            _next_ordinal_to_read += 1;
17650            if next_offset >= end_offset {
17651                return Ok(());
17652            }
17653
17654            // Decode unknown envelopes for gaps in ordinals.
17655            while _next_ordinal_to_read < 1 {
17656                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17657                _next_ordinal_to_read += 1;
17658                next_offset += envelope_size;
17659            }
17660
17661            let next_out_of_line = decoder.next_out_of_line();
17662            let handles_before = decoder.remaining_handles();
17663            if let Some((inlined, num_bytes, num_handles)) =
17664                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17665            {
17666                let member_inline_size = <fidl::encoding::HandleType<
17667                    fidl::Vmo,
17668                    { fidl::ObjectType::VMO.into_raw() },
17669                    2147483648,
17670                > as fidl::encoding::TypeMarker>::inline_size(
17671                    decoder.context
17672                );
17673                if inlined != (member_inline_size <= 4) {
17674                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17675                }
17676                let inner_offset;
17677                let mut inner_depth = depth.clone();
17678                if inlined {
17679                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17680                    inner_offset = next_offset;
17681                } else {
17682                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17683                    inner_depth.increment()?;
17684                }
17685                let val_ref =
17686                self.vmo.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
17687                fidl::decode!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
17688                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17689                {
17690                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17691                }
17692                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17693                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17694                }
17695            }
17696
17697            next_offset += envelope_size;
17698
17699            // Decode the remaining unknown envelopes.
17700            while next_offset < end_offset {
17701                _next_ordinal_to_read += 1;
17702                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17703                next_offset += envelope_size;
17704            }
17705
17706            Ok(())
17707        }
17708    }
17709
17710    impl AllocatorGetVmoInfoResponse {
17711        #[inline(always)]
17712        fn max_ordinal_present(&self) -> u64 {
17713            if let Some(_) = self.close_weak_asap {
17714                return 3;
17715            }
17716            if let Some(_) = self.buffer_index {
17717                return 2;
17718            }
17719            if let Some(_) = self.buffer_collection_id {
17720                return 1;
17721            }
17722            0
17723        }
17724    }
17725
17726    impl fidl::encoding::ResourceTypeMarker for AllocatorGetVmoInfoResponse {
17727        type Borrowed<'a> = &'a mut Self;
17728        fn take_or_borrow<'a>(
17729            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
17730        ) -> Self::Borrowed<'a> {
17731            value
17732        }
17733    }
17734
17735    unsafe impl fidl::encoding::TypeMarker for AllocatorGetVmoInfoResponse {
17736        type Owned = Self;
17737
17738        #[inline(always)]
17739        fn inline_align(_context: fidl::encoding::Context) -> usize {
17740            8
17741        }
17742
17743        #[inline(always)]
17744        fn inline_size(_context: fidl::encoding::Context) -> usize {
17745            16
17746        }
17747    }
17748
17749    unsafe impl
17750        fidl::encoding::Encode<
17751            AllocatorGetVmoInfoResponse,
17752            fidl::encoding::DefaultFuchsiaResourceDialect,
17753        > for &mut AllocatorGetVmoInfoResponse
17754    {
17755        unsafe fn encode(
17756            self,
17757            encoder: &mut fidl::encoding::Encoder<
17758                '_,
17759                fidl::encoding::DefaultFuchsiaResourceDialect,
17760            >,
17761            offset: usize,
17762            mut depth: fidl::encoding::Depth,
17763        ) -> fidl::Result<()> {
17764            encoder.debug_check_bounds::<AllocatorGetVmoInfoResponse>(offset);
17765            // Vector header
17766            let max_ordinal: u64 = self.max_ordinal_present();
17767            encoder.write_num(max_ordinal, offset);
17768            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
17769            // Calling encoder.out_of_line_offset(0) is not allowed.
17770            if max_ordinal == 0 {
17771                return Ok(());
17772            }
17773            depth.increment()?;
17774            let envelope_size = 8;
17775            let bytes_len = max_ordinal as usize * envelope_size;
17776            #[allow(unused_variables)]
17777            let offset = encoder.out_of_line_offset(bytes_len);
17778            let mut _prev_end_offset: usize = 0;
17779            if 1 > max_ordinal {
17780                return Ok(());
17781            }
17782
17783            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17784            // are envelope_size bytes.
17785            let cur_offset: usize = (1 - 1) * envelope_size;
17786
17787            // Zero reserved fields.
17788            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17789
17790            // Safety:
17791            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17792            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17793            //   envelope_size bytes, there is always sufficient room.
17794            fidl::encoding::encode_in_envelope_optional::<
17795                u64,
17796                fidl::encoding::DefaultFuchsiaResourceDialect,
17797            >(
17798                self.buffer_collection_id
17799                    .as_ref()
17800                    .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
17801                encoder,
17802                offset + cur_offset,
17803                depth,
17804            )?;
17805
17806            _prev_end_offset = cur_offset + envelope_size;
17807            if 2 > max_ordinal {
17808                return Ok(());
17809            }
17810
17811            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17812            // are envelope_size bytes.
17813            let cur_offset: usize = (2 - 1) * envelope_size;
17814
17815            // Zero reserved fields.
17816            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17817
17818            // Safety:
17819            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17820            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17821            //   envelope_size bytes, there is always sufficient room.
17822            fidl::encoding::encode_in_envelope_optional::<
17823                u64,
17824                fidl::encoding::DefaultFuchsiaResourceDialect,
17825            >(
17826                self.buffer_index.as_ref().map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
17827                encoder,
17828                offset + cur_offset,
17829                depth,
17830            )?;
17831
17832            _prev_end_offset = cur_offset + envelope_size;
17833            if 3 > max_ordinal {
17834                return Ok(());
17835            }
17836
17837            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
17838            // are envelope_size bytes.
17839            let cur_offset: usize = (3 - 1) * envelope_size;
17840
17841            // Zero reserved fields.
17842            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
17843
17844            // Safety:
17845            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
17846            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
17847            //   envelope_size bytes, there is always sufficient room.
17848            fidl::encoding::encode_in_envelope_optional::<
17849                fidl::encoding::HandleType<
17850                    fidl::EventPair,
17851                    { fidl::ObjectType::EVENTPAIR.into_raw() },
17852                    2147483648,
17853                >,
17854                fidl::encoding::DefaultFuchsiaResourceDialect,
17855            >(
17856                self.close_weak_asap.as_mut().map(
17857                    <fidl::encoding::HandleType<
17858                        fidl::EventPair,
17859                        { fidl::ObjectType::EVENTPAIR.into_raw() },
17860                        2147483648,
17861                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
17862                ),
17863                encoder,
17864                offset + cur_offset,
17865                depth,
17866            )?;
17867
17868            _prev_end_offset = cur_offset + envelope_size;
17869
17870            Ok(())
17871        }
17872    }
17873
17874    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
17875        for AllocatorGetVmoInfoResponse
17876    {
17877        #[inline(always)]
17878        fn new_empty() -> Self {
17879            Self::default()
17880        }
17881
17882        unsafe fn decode(
17883            &mut self,
17884            decoder: &mut fidl::encoding::Decoder<
17885                '_,
17886                fidl::encoding::DefaultFuchsiaResourceDialect,
17887            >,
17888            offset: usize,
17889            mut depth: fidl::encoding::Depth,
17890        ) -> fidl::Result<()> {
17891            decoder.debug_check_bounds::<Self>(offset);
17892            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
17893                None => return Err(fidl::Error::NotNullable),
17894                Some(len) => len,
17895            };
17896            // Calling decoder.out_of_line_offset(0) is not allowed.
17897            if len == 0 {
17898                return Ok(());
17899            };
17900            depth.increment()?;
17901            let envelope_size = 8;
17902            let bytes_len = len * envelope_size;
17903            let offset = decoder.out_of_line_offset(bytes_len)?;
17904            // Decode the envelope for each type.
17905            let mut _next_ordinal_to_read = 0;
17906            let mut next_offset = offset;
17907            let end_offset = offset + bytes_len;
17908            _next_ordinal_to_read += 1;
17909            if next_offset >= end_offset {
17910                return Ok(());
17911            }
17912
17913            // Decode unknown envelopes for gaps in ordinals.
17914            while _next_ordinal_to_read < 1 {
17915                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17916                _next_ordinal_to_read += 1;
17917                next_offset += envelope_size;
17918            }
17919
17920            let next_out_of_line = decoder.next_out_of_line();
17921            let handles_before = decoder.remaining_handles();
17922            if let Some((inlined, num_bytes, num_handles)) =
17923                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17924            {
17925                let member_inline_size =
17926                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
17927                if inlined != (member_inline_size <= 4) {
17928                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17929                }
17930                let inner_offset;
17931                let mut inner_depth = depth.clone();
17932                if inlined {
17933                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17934                    inner_offset = next_offset;
17935                } else {
17936                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17937                    inner_depth.increment()?;
17938                }
17939                let val_ref = self.buffer_collection_id.get_or_insert_with(|| {
17940                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
17941                });
17942                fidl::decode!(
17943                    u64,
17944                    fidl::encoding::DefaultFuchsiaResourceDialect,
17945                    val_ref,
17946                    decoder,
17947                    inner_offset,
17948                    inner_depth
17949                )?;
17950                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
17951                {
17952                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
17953                }
17954                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
17955                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
17956                }
17957            }
17958
17959            next_offset += envelope_size;
17960            _next_ordinal_to_read += 1;
17961            if next_offset >= end_offset {
17962                return Ok(());
17963            }
17964
17965            // Decode unknown envelopes for gaps in ordinals.
17966            while _next_ordinal_to_read < 2 {
17967                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
17968                _next_ordinal_to_read += 1;
17969                next_offset += envelope_size;
17970            }
17971
17972            let next_out_of_line = decoder.next_out_of_line();
17973            let handles_before = decoder.remaining_handles();
17974            if let Some((inlined, num_bytes, num_handles)) =
17975                fidl::encoding::decode_envelope_header(decoder, next_offset)?
17976            {
17977                let member_inline_size =
17978                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
17979                if inlined != (member_inline_size <= 4) {
17980                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
17981                }
17982                let inner_offset;
17983                let mut inner_depth = depth.clone();
17984                if inlined {
17985                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
17986                    inner_offset = next_offset;
17987                } else {
17988                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
17989                    inner_depth.increment()?;
17990                }
17991                let val_ref = self.buffer_index.get_or_insert_with(|| {
17992                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
17993                });
17994                fidl::decode!(
17995                    u64,
17996                    fidl::encoding::DefaultFuchsiaResourceDialect,
17997                    val_ref,
17998                    decoder,
17999                    inner_offset,
18000                    inner_depth
18001                )?;
18002                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18003                {
18004                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18005                }
18006                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18007                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18008                }
18009            }
18010
18011            next_offset += envelope_size;
18012            _next_ordinal_to_read += 1;
18013            if next_offset >= end_offset {
18014                return Ok(());
18015            }
18016
18017            // Decode unknown envelopes for gaps in ordinals.
18018            while _next_ordinal_to_read < 3 {
18019                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18020                _next_ordinal_to_read += 1;
18021                next_offset += envelope_size;
18022            }
18023
18024            let next_out_of_line = decoder.next_out_of_line();
18025            let handles_before = decoder.remaining_handles();
18026            if let Some((inlined, num_bytes, num_handles)) =
18027                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18028            {
18029                let member_inline_size = <fidl::encoding::HandleType<
18030                    fidl::EventPair,
18031                    { fidl::ObjectType::EVENTPAIR.into_raw() },
18032                    2147483648,
18033                > as fidl::encoding::TypeMarker>::inline_size(
18034                    decoder.context
18035                );
18036                if inlined != (member_inline_size <= 4) {
18037                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18038                }
18039                let inner_offset;
18040                let mut inner_depth = depth.clone();
18041                if inlined {
18042                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18043                    inner_offset = next_offset;
18044                } else {
18045                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18046                    inner_depth.increment()?;
18047                }
18048                let val_ref =
18049                self.close_weak_asap.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
18050                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18051                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18052                {
18053                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18054                }
18055                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18056                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18057                }
18058            }
18059
18060            next_offset += envelope_size;
18061
18062            // Decode the remaining unknown envelopes.
18063            while next_offset < end_offset {
18064                _next_ordinal_to_read += 1;
18065                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18066                next_offset += envelope_size;
18067            }
18068
18069            Ok(())
18070        }
18071    }
18072
18073    impl BufferCollectionAttachLifetimeTrackingRequest {
18074        #[inline(always)]
18075        fn max_ordinal_present(&self) -> u64 {
18076            if let Some(_) = self.buffers_remaining {
18077                return 2;
18078            }
18079            if let Some(_) = self.server_end {
18080                return 1;
18081            }
18082            0
18083        }
18084    }
18085
18086    impl fidl::encoding::ResourceTypeMarker for BufferCollectionAttachLifetimeTrackingRequest {
18087        type Borrowed<'a> = &'a mut Self;
18088        fn take_or_borrow<'a>(
18089            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18090        ) -> Self::Borrowed<'a> {
18091            value
18092        }
18093    }
18094
18095    unsafe impl fidl::encoding::TypeMarker for BufferCollectionAttachLifetimeTrackingRequest {
18096        type Owned = Self;
18097
18098        #[inline(always)]
18099        fn inline_align(_context: fidl::encoding::Context) -> usize {
18100            8
18101        }
18102
18103        #[inline(always)]
18104        fn inline_size(_context: fidl::encoding::Context) -> usize {
18105            16
18106        }
18107    }
18108
18109    unsafe impl
18110        fidl::encoding::Encode<
18111            BufferCollectionAttachLifetimeTrackingRequest,
18112            fidl::encoding::DefaultFuchsiaResourceDialect,
18113        > for &mut BufferCollectionAttachLifetimeTrackingRequest
18114    {
18115        unsafe fn encode(
18116            self,
18117            encoder: &mut fidl::encoding::Encoder<
18118                '_,
18119                fidl::encoding::DefaultFuchsiaResourceDialect,
18120            >,
18121            offset: usize,
18122            mut depth: fidl::encoding::Depth,
18123        ) -> fidl::Result<()> {
18124            encoder.debug_check_bounds::<BufferCollectionAttachLifetimeTrackingRequest>(offset);
18125            // Vector header
18126            let max_ordinal: u64 = self.max_ordinal_present();
18127            encoder.write_num(max_ordinal, offset);
18128            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18129            // Calling encoder.out_of_line_offset(0) is not allowed.
18130            if max_ordinal == 0 {
18131                return Ok(());
18132            }
18133            depth.increment()?;
18134            let envelope_size = 8;
18135            let bytes_len = max_ordinal as usize * envelope_size;
18136            #[allow(unused_variables)]
18137            let offset = encoder.out_of_line_offset(bytes_len);
18138            let mut _prev_end_offset: usize = 0;
18139            if 1 > max_ordinal {
18140                return Ok(());
18141            }
18142
18143            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18144            // are envelope_size bytes.
18145            let cur_offset: usize = (1 - 1) * envelope_size;
18146
18147            // Zero reserved fields.
18148            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18149
18150            // Safety:
18151            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18152            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18153            //   envelope_size bytes, there is always sufficient room.
18154            fidl::encoding::encode_in_envelope_optional::<
18155                fidl::encoding::HandleType<
18156                    fidl::EventPair,
18157                    { fidl::ObjectType::EVENTPAIR.into_raw() },
18158                    2147483648,
18159                >,
18160                fidl::encoding::DefaultFuchsiaResourceDialect,
18161            >(
18162                self.server_end.as_mut().map(
18163                    <fidl::encoding::HandleType<
18164                        fidl::EventPair,
18165                        { fidl::ObjectType::EVENTPAIR.into_raw() },
18166                        2147483648,
18167                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
18168                ),
18169                encoder,
18170                offset + cur_offset,
18171                depth,
18172            )?;
18173
18174            _prev_end_offset = cur_offset + envelope_size;
18175            if 2 > max_ordinal {
18176                return Ok(());
18177            }
18178
18179            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18180            // are envelope_size bytes.
18181            let cur_offset: usize = (2 - 1) * envelope_size;
18182
18183            // Zero reserved fields.
18184            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18185
18186            // Safety:
18187            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18188            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18189            //   envelope_size bytes, there is always sufficient room.
18190            fidl::encoding::encode_in_envelope_optional::<
18191                u32,
18192                fidl::encoding::DefaultFuchsiaResourceDialect,
18193            >(
18194                self.buffers_remaining
18195                    .as_ref()
18196                    .map(<u32 as fidl::encoding::ValueTypeMarker>::borrow),
18197                encoder,
18198                offset + cur_offset,
18199                depth,
18200            )?;
18201
18202            _prev_end_offset = cur_offset + envelope_size;
18203
18204            Ok(())
18205        }
18206    }
18207
18208    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18209        for BufferCollectionAttachLifetimeTrackingRequest
18210    {
18211        #[inline(always)]
18212        fn new_empty() -> Self {
18213            Self::default()
18214        }
18215
18216        unsafe fn decode(
18217            &mut self,
18218            decoder: &mut fidl::encoding::Decoder<
18219                '_,
18220                fidl::encoding::DefaultFuchsiaResourceDialect,
18221            >,
18222            offset: usize,
18223            mut depth: fidl::encoding::Depth,
18224        ) -> fidl::Result<()> {
18225            decoder.debug_check_bounds::<Self>(offset);
18226            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18227                None => return Err(fidl::Error::NotNullable),
18228                Some(len) => len,
18229            };
18230            // Calling decoder.out_of_line_offset(0) is not allowed.
18231            if len == 0 {
18232                return Ok(());
18233            };
18234            depth.increment()?;
18235            let envelope_size = 8;
18236            let bytes_len = len * envelope_size;
18237            let offset = decoder.out_of_line_offset(bytes_len)?;
18238            // Decode the envelope for each type.
18239            let mut _next_ordinal_to_read = 0;
18240            let mut next_offset = offset;
18241            let end_offset = offset + bytes_len;
18242            _next_ordinal_to_read += 1;
18243            if next_offset >= end_offset {
18244                return Ok(());
18245            }
18246
18247            // Decode unknown envelopes for gaps in ordinals.
18248            while _next_ordinal_to_read < 1 {
18249                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18250                _next_ordinal_to_read += 1;
18251                next_offset += envelope_size;
18252            }
18253
18254            let next_out_of_line = decoder.next_out_of_line();
18255            let handles_before = decoder.remaining_handles();
18256            if let Some((inlined, num_bytes, num_handles)) =
18257                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18258            {
18259                let member_inline_size = <fidl::encoding::HandleType<
18260                    fidl::EventPair,
18261                    { fidl::ObjectType::EVENTPAIR.into_raw() },
18262                    2147483648,
18263                > as fidl::encoding::TypeMarker>::inline_size(
18264                    decoder.context
18265                );
18266                if inlined != (member_inline_size <= 4) {
18267                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18268                }
18269                let inner_offset;
18270                let mut inner_depth = depth.clone();
18271                if inlined {
18272                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18273                    inner_offset = next_offset;
18274                } else {
18275                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18276                    inner_depth.increment()?;
18277                }
18278                let val_ref =
18279                self.server_end.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
18280                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18281                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18282                {
18283                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18284                }
18285                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18286                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18287                }
18288            }
18289
18290            next_offset += envelope_size;
18291            _next_ordinal_to_read += 1;
18292            if next_offset >= end_offset {
18293                return Ok(());
18294            }
18295
18296            // Decode unknown envelopes for gaps in ordinals.
18297            while _next_ordinal_to_read < 2 {
18298                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18299                _next_ordinal_to_read += 1;
18300                next_offset += envelope_size;
18301            }
18302
18303            let next_out_of_line = decoder.next_out_of_line();
18304            let handles_before = decoder.remaining_handles();
18305            if let Some((inlined, num_bytes, num_handles)) =
18306                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18307            {
18308                let member_inline_size =
18309                    <u32 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18310                if inlined != (member_inline_size <= 4) {
18311                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18312                }
18313                let inner_offset;
18314                let mut inner_depth = depth.clone();
18315                if inlined {
18316                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18317                    inner_offset = next_offset;
18318                } else {
18319                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18320                    inner_depth.increment()?;
18321                }
18322                let val_ref = self.buffers_remaining.get_or_insert_with(|| {
18323                    fidl::new_empty!(u32, fidl::encoding::DefaultFuchsiaResourceDialect)
18324                });
18325                fidl::decode!(
18326                    u32,
18327                    fidl::encoding::DefaultFuchsiaResourceDialect,
18328                    val_ref,
18329                    decoder,
18330                    inner_offset,
18331                    inner_depth
18332                )?;
18333                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18334                {
18335                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18336                }
18337                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18338                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18339                }
18340            }
18341
18342            next_offset += envelope_size;
18343
18344            // Decode the remaining unknown envelopes.
18345            while next_offset < end_offset {
18346                _next_ordinal_to_read += 1;
18347                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18348                next_offset += envelope_size;
18349            }
18350
18351            Ok(())
18352        }
18353    }
18354
18355    impl BufferCollectionAttachTokenRequest {
18356        #[inline(always)]
18357        fn max_ordinal_present(&self) -> u64 {
18358            if let Some(_) = self.token_request {
18359                return 2;
18360            }
18361            if let Some(_) = self.rights_attenuation_mask {
18362                return 1;
18363            }
18364            0
18365        }
18366    }
18367
18368    impl fidl::encoding::ResourceTypeMarker for BufferCollectionAttachTokenRequest {
18369        type Borrowed<'a> = &'a mut Self;
18370        fn take_or_borrow<'a>(
18371            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18372        ) -> Self::Borrowed<'a> {
18373            value
18374        }
18375    }
18376
18377    unsafe impl fidl::encoding::TypeMarker for BufferCollectionAttachTokenRequest {
18378        type Owned = Self;
18379
18380        #[inline(always)]
18381        fn inline_align(_context: fidl::encoding::Context) -> usize {
18382            8
18383        }
18384
18385        #[inline(always)]
18386        fn inline_size(_context: fidl::encoding::Context) -> usize {
18387            16
18388        }
18389    }
18390
18391    unsafe impl
18392        fidl::encoding::Encode<
18393            BufferCollectionAttachTokenRequest,
18394            fidl::encoding::DefaultFuchsiaResourceDialect,
18395        > for &mut BufferCollectionAttachTokenRequest
18396    {
18397        unsafe fn encode(
18398            self,
18399            encoder: &mut fidl::encoding::Encoder<
18400                '_,
18401                fidl::encoding::DefaultFuchsiaResourceDialect,
18402            >,
18403            offset: usize,
18404            mut depth: fidl::encoding::Depth,
18405        ) -> fidl::Result<()> {
18406            encoder.debug_check_bounds::<BufferCollectionAttachTokenRequest>(offset);
18407            // Vector header
18408            let max_ordinal: u64 = self.max_ordinal_present();
18409            encoder.write_num(max_ordinal, offset);
18410            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18411            // Calling encoder.out_of_line_offset(0) is not allowed.
18412            if max_ordinal == 0 {
18413                return Ok(());
18414            }
18415            depth.increment()?;
18416            let envelope_size = 8;
18417            let bytes_len = max_ordinal as usize * envelope_size;
18418            #[allow(unused_variables)]
18419            let offset = encoder.out_of_line_offset(bytes_len);
18420            let mut _prev_end_offset: usize = 0;
18421            if 1 > max_ordinal {
18422                return Ok(());
18423            }
18424
18425            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18426            // are envelope_size bytes.
18427            let cur_offset: usize = (1 - 1) * envelope_size;
18428
18429            // Zero reserved fields.
18430            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18431
18432            // Safety:
18433            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18434            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18435            //   envelope_size bytes, there is always sufficient room.
18436            fidl::encoding::encode_in_envelope_optional::<
18437                fidl::Rights,
18438                fidl::encoding::DefaultFuchsiaResourceDialect,
18439            >(
18440                self.rights_attenuation_mask
18441                    .as_ref()
18442                    .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
18443                encoder,
18444                offset + cur_offset,
18445                depth,
18446            )?;
18447
18448            _prev_end_offset = cur_offset + envelope_size;
18449            if 2 > max_ordinal {
18450                return Ok(());
18451            }
18452
18453            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18454            // are envelope_size bytes.
18455            let cur_offset: usize = (2 - 1) * envelope_size;
18456
18457            // Zero reserved fields.
18458            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18459
18460            // Safety:
18461            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18462            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18463            //   envelope_size bytes, there is always sufficient room.
18464            fidl::encoding::encode_in_envelope_optional::<
18465                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
18466                fidl::encoding::DefaultFuchsiaResourceDialect,
18467            >(
18468                self.token_request.as_mut().map(
18469                    <fidl::encoding::Endpoint<
18470                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18471                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
18472                ),
18473                encoder,
18474                offset + cur_offset,
18475                depth,
18476            )?;
18477
18478            _prev_end_offset = cur_offset + envelope_size;
18479
18480            Ok(())
18481        }
18482    }
18483
18484    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18485        for BufferCollectionAttachTokenRequest
18486    {
18487        #[inline(always)]
18488        fn new_empty() -> Self {
18489            Self::default()
18490        }
18491
18492        unsafe fn decode(
18493            &mut self,
18494            decoder: &mut fidl::encoding::Decoder<
18495                '_,
18496                fidl::encoding::DefaultFuchsiaResourceDialect,
18497            >,
18498            offset: usize,
18499            mut depth: fidl::encoding::Depth,
18500        ) -> fidl::Result<()> {
18501            decoder.debug_check_bounds::<Self>(offset);
18502            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18503                None => return Err(fidl::Error::NotNullable),
18504                Some(len) => len,
18505            };
18506            // Calling decoder.out_of_line_offset(0) is not allowed.
18507            if len == 0 {
18508                return Ok(());
18509            };
18510            depth.increment()?;
18511            let envelope_size = 8;
18512            let bytes_len = len * envelope_size;
18513            let offset = decoder.out_of_line_offset(bytes_len)?;
18514            // Decode the envelope for each type.
18515            let mut _next_ordinal_to_read = 0;
18516            let mut next_offset = offset;
18517            let end_offset = offset + bytes_len;
18518            _next_ordinal_to_read += 1;
18519            if next_offset >= end_offset {
18520                return Ok(());
18521            }
18522
18523            // Decode unknown envelopes for gaps in ordinals.
18524            while _next_ordinal_to_read < 1 {
18525                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18526                _next_ordinal_to_read += 1;
18527                next_offset += envelope_size;
18528            }
18529
18530            let next_out_of_line = decoder.next_out_of_line();
18531            let handles_before = decoder.remaining_handles();
18532            if let Some((inlined, num_bytes, num_handles)) =
18533                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18534            {
18535                let member_inline_size =
18536                    <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18537                if inlined != (member_inline_size <= 4) {
18538                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18539                }
18540                let inner_offset;
18541                let mut inner_depth = depth.clone();
18542                if inlined {
18543                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18544                    inner_offset = next_offset;
18545                } else {
18546                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18547                    inner_depth.increment()?;
18548                }
18549                let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
18550                    fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
18551                });
18552                fidl::decode!(
18553                    fidl::Rights,
18554                    fidl::encoding::DefaultFuchsiaResourceDialect,
18555                    val_ref,
18556                    decoder,
18557                    inner_offset,
18558                    inner_depth
18559                )?;
18560                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18561                {
18562                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18563                }
18564                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18565                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18566                }
18567            }
18568
18569            next_offset += envelope_size;
18570            _next_ordinal_to_read += 1;
18571            if next_offset >= end_offset {
18572                return Ok(());
18573            }
18574
18575            // Decode unknown envelopes for gaps in ordinals.
18576            while _next_ordinal_to_read < 2 {
18577                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18578                _next_ordinal_to_read += 1;
18579                next_offset += envelope_size;
18580            }
18581
18582            let next_out_of_line = decoder.next_out_of_line();
18583            let handles_before = decoder.remaining_handles();
18584            if let Some((inlined, num_bytes, num_handles)) =
18585                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18586            {
18587                let member_inline_size = <fidl::encoding::Endpoint<
18588                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18589                > as fidl::encoding::TypeMarker>::inline_size(
18590                    decoder.context
18591                );
18592                if inlined != (member_inline_size <= 4) {
18593                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18594                }
18595                let inner_offset;
18596                let mut inner_depth = depth.clone();
18597                if inlined {
18598                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18599                    inner_offset = next_offset;
18600                } else {
18601                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18602                    inner_depth.increment()?;
18603                }
18604                let val_ref = self.token_request.get_or_insert_with(|| {
18605                    fidl::new_empty!(
18606                        fidl::encoding::Endpoint<
18607                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18608                        >,
18609                        fidl::encoding::DefaultFuchsiaResourceDialect
18610                    )
18611                });
18612                fidl::decode!(
18613                    fidl::encoding::Endpoint<
18614                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
18615                    >,
18616                    fidl::encoding::DefaultFuchsiaResourceDialect,
18617                    val_ref,
18618                    decoder,
18619                    inner_offset,
18620                    inner_depth
18621                )?;
18622                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18623                {
18624                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18625                }
18626                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18627                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18628                }
18629            }
18630
18631            next_offset += envelope_size;
18632
18633            // Decode the remaining unknown envelopes.
18634            while next_offset < end_offset {
18635                _next_ordinal_to_read += 1;
18636                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18637                next_offset += envelope_size;
18638            }
18639
18640            Ok(())
18641        }
18642    }
18643
18644    impl BufferCollectionInfo {
18645        #[inline(always)]
18646        fn max_ordinal_present(&self) -> u64 {
18647            if let Some(_) = self.buffer_collection_id {
18648                return 3;
18649            }
18650            if let Some(_) = self.buffers {
18651                return 2;
18652            }
18653            if let Some(_) = self.settings {
18654                return 1;
18655            }
18656            0
18657        }
18658    }
18659
18660    impl fidl::encoding::ResourceTypeMarker for BufferCollectionInfo {
18661        type Borrowed<'a> = &'a mut Self;
18662        fn take_or_borrow<'a>(
18663            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
18664        ) -> Self::Borrowed<'a> {
18665            value
18666        }
18667    }
18668
18669    unsafe impl fidl::encoding::TypeMarker for BufferCollectionInfo {
18670        type Owned = Self;
18671
18672        #[inline(always)]
18673        fn inline_align(_context: fidl::encoding::Context) -> usize {
18674            8
18675        }
18676
18677        #[inline(always)]
18678        fn inline_size(_context: fidl::encoding::Context) -> usize {
18679            16
18680        }
18681    }
18682
18683    unsafe impl
18684        fidl::encoding::Encode<BufferCollectionInfo, fidl::encoding::DefaultFuchsiaResourceDialect>
18685        for &mut BufferCollectionInfo
18686    {
18687        unsafe fn encode(
18688            self,
18689            encoder: &mut fidl::encoding::Encoder<
18690                '_,
18691                fidl::encoding::DefaultFuchsiaResourceDialect,
18692            >,
18693            offset: usize,
18694            mut depth: fidl::encoding::Depth,
18695        ) -> fidl::Result<()> {
18696            encoder.debug_check_bounds::<BufferCollectionInfo>(offset);
18697            // Vector header
18698            let max_ordinal: u64 = self.max_ordinal_present();
18699            encoder.write_num(max_ordinal, offset);
18700            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
18701            // Calling encoder.out_of_line_offset(0) is not allowed.
18702            if max_ordinal == 0 {
18703                return Ok(());
18704            }
18705            depth.increment()?;
18706            let envelope_size = 8;
18707            let bytes_len = max_ordinal as usize * envelope_size;
18708            #[allow(unused_variables)]
18709            let offset = encoder.out_of_line_offset(bytes_len);
18710            let mut _prev_end_offset: usize = 0;
18711            if 1 > max_ordinal {
18712                return Ok(());
18713            }
18714
18715            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18716            // are envelope_size bytes.
18717            let cur_offset: usize = (1 - 1) * envelope_size;
18718
18719            // Zero reserved fields.
18720            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18721
18722            // Safety:
18723            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18724            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18725            //   envelope_size bytes, there is always sufficient room.
18726            fidl::encoding::encode_in_envelope_optional::<
18727                SingleBufferSettings,
18728                fidl::encoding::DefaultFuchsiaResourceDialect,
18729            >(
18730                self.settings
18731                    .as_ref()
18732                    .map(<SingleBufferSettings as fidl::encoding::ValueTypeMarker>::borrow),
18733                encoder,
18734                offset + cur_offset,
18735                depth,
18736            )?;
18737
18738            _prev_end_offset = cur_offset + envelope_size;
18739            if 2 > max_ordinal {
18740                return Ok(());
18741            }
18742
18743            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18744            // are envelope_size bytes.
18745            let cur_offset: usize = (2 - 1) * envelope_size;
18746
18747            // Zero reserved fields.
18748            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18749
18750            // Safety:
18751            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18752            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18753            //   envelope_size bytes, there is always sufficient room.
18754            fidl::encoding::encode_in_envelope_optional::<fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect>(
18755            self.buffers.as_mut().map(<fidl::encoding::Vector<VmoBuffer, 128> as fidl::encoding::ResourceTypeMarker>::take_or_borrow),
18756            encoder, offset + cur_offset, depth
18757        )?;
18758
18759            _prev_end_offset = cur_offset + envelope_size;
18760            if 3 > max_ordinal {
18761                return Ok(());
18762            }
18763
18764            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
18765            // are envelope_size bytes.
18766            let cur_offset: usize = (3 - 1) * envelope_size;
18767
18768            // Zero reserved fields.
18769            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
18770
18771            // Safety:
18772            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
18773            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
18774            //   envelope_size bytes, there is always sufficient room.
18775            fidl::encoding::encode_in_envelope_optional::<
18776                u64,
18777                fidl::encoding::DefaultFuchsiaResourceDialect,
18778            >(
18779                self.buffer_collection_id
18780                    .as_ref()
18781                    .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
18782                encoder,
18783                offset + cur_offset,
18784                depth,
18785            )?;
18786
18787            _prev_end_offset = cur_offset + envelope_size;
18788
18789            Ok(())
18790        }
18791    }
18792
18793    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
18794        for BufferCollectionInfo
18795    {
18796        #[inline(always)]
18797        fn new_empty() -> Self {
18798            Self::default()
18799        }
18800
18801        unsafe fn decode(
18802            &mut self,
18803            decoder: &mut fidl::encoding::Decoder<
18804                '_,
18805                fidl::encoding::DefaultFuchsiaResourceDialect,
18806            >,
18807            offset: usize,
18808            mut depth: fidl::encoding::Depth,
18809        ) -> fidl::Result<()> {
18810            decoder.debug_check_bounds::<Self>(offset);
18811            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
18812                None => return Err(fidl::Error::NotNullable),
18813                Some(len) => len,
18814            };
18815            // Calling decoder.out_of_line_offset(0) is not allowed.
18816            if len == 0 {
18817                return Ok(());
18818            };
18819            depth.increment()?;
18820            let envelope_size = 8;
18821            let bytes_len = len * envelope_size;
18822            let offset = decoder.out_of_line_offset(bytes_len)?;
18823            // Decode the envelope for each type.
18824            let mut _next_ordinal_to_read = 0;
18825            let mut next_offset = offset;
18826            let end_offset = offset + bytes_len;
18827            _next_ordinal_to_read += 1;
18828            if next_offset >= end_offset {
18829                return Ok(());
18830            }
18831
18832            // Decode unknown envelopes for gaps in ordinals.
18833            while _next_ordinal_to_read < 1 {
18834                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18835                _next_ordinal_to_read += 1;
18836                next_offset += envelope_size;
18837            }
18838
18839            let next_out_of_line = decoder.next_out_of_line();
18840            let handles_before = decoder.remaining_handles();
18841            if let Some((inlined, num_bytes, num_handles)) =
18842                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18843            {
18844                let member_inline_size =
18845                    <SingleBufferSettings as fidl::encoding::TypeMarker>::inline_size(
18846                        decoder.context,
18847                    );
18848                if inlined != (member_inline_size <= 4) {
18849                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18850                }
18851                let inner_offset;
18852                let mut inner_depth = depth.clone();
18853                if inlined {
18854                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18855                    inner_offset = next_offset;
18856                } else {
18857                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18858                    inner_depth.increment()?;
18859                }
18860                let val_ref = self.settings.get_or_insert_with(|| {
18861                    fidl::new_empty!(
18862                        SingleBufferSettings,
18863                        fidl::encoding::DefaultFuchsiaResourceDialect
18864                    )
18865                });
18866                fidl::decode!(
18867                    SingleBufferSettings,
18868                    fidl::encoding::DefaultFuchsiaResourceDialect,
18869                    val_ref,
18870                    decoder,
18871                    inner_offset,
18872                    inner_depth
18873                )?;
18874                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18875                {
18876                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18877                }
18878                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18879                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18880                }
18881            }
18882
18883            next_offset += envelope_size;
18884            _next_ordinal_to_read += 1;
18885            if next_offset >= end_offset {
18886                return Ok(());
18887            }
18888
18889            // Decode unknown envelopes for gaps in ordinals.
18890            while _next_ordinal_to_read < 2 {
18891                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18892                _next_ordinal_to_read += 1;
18893                next_offset += envelope_size;
18894            }
18895
18896            let next_out_of_line = decoder.next_out_of_line();
18897            let handles_before = decoder.remaining_handles();
18898            if let Some((inlined, num_bytes, num_handles)) =
18899                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18900            {
18901                let member_inline_size = <fidl::encoding::Vector<VmoBuffer, 128> as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18902                if inlined != (member_inline_size <= 4) {
18903                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18904                }
18905                let inner_offset;
18906                let mut inner_depth = depth.clone();
18907                if inlined {
18908                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18909                    inner_offset = next_offset;
18910                } else {
18911                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18912                    inner_depth.increment()?;
18913                }
18914                let val_ref =
18915                self.buffers.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect));
18916                fidl::decode!(fidl::encoding::Vector<VmoBuffer, 128>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
18917                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18918                {
18919                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18920                }
18921                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18922                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18923                }
18924            }
18925
18926            next_offset += envelope_size;
18927            _next_ordinal_to_read += 1;
18928            if next_offset >= end_offset {
18929                return Ok(());
18930            }
18931
18932            // Decode unknown envelopes for gaps in ordinals.
18933            while _next_ordinal_to_read < 3 {
18934                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18935                _next_ordinal_to_read += 1;
18936                next_offset += envelope_size;
18937            }
18938
18939            let next_out_of_line = decoder.next_out_of_line();
18940            let handles_before = decoder.remaining_handles();
18941            if let Some((inlined, num_bytes, num_handles)) =
18942                fidl::encoding::decode_envelope_header(decoder, next_offset)?
18943            {
18944                let member_inline_size =
18945                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
18946                if inlined != (member_inline_size <= 4) {
18947                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
18948                }
18949                let inner_offset;
18950                let mut inner_depth = depth.clone();
18951                if inlined {
18952                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
18953                    inner_offset = next_offset;
18954                } else {
18955                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
18956                    inner_depth.increment()?;
18957                }
18958                let val_ref = self.buffer_collection_id.get_or_insert_with(|| {
18959                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
18960                });
18961                fidl::decode!(
18962                    u64,
18963                    fidl::encoding::DefaultFuchsiaResourceDialect,
18964                    val_ref,
18965                    decoder,
18966                    inner_offset,
18967                    inner_depth
18968                )?;
18969                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
18970                {
18971                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
18972                }
18973                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
18974                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
18975                }
18976            }
18977
18978            next_offset += envelope_size;
18979
18980            // Decode the remaining unknown envelopes.
18981            while next_offset < end_offset {
18982                _next_ordinal_to_read += 1;
18983                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
18984                next_offset += envelope_size;
18985            }
18986
18987            Ok(())
18988        }
18989    }
18990
18991    impl BufferCollectionSetConstraintsRequest {
18992        #[inline(always)]
18993        fn max_ordinal_present(&self) -> u64 {
18994            if let Some(_) = self.constraints {
18995                return 1;
18996            }
18997            0
18998        }
18999    }
19000
19001    impl fidl::encoding::ResourceTypeMarker for BufferCollectionSetConstraintsRequest {
19002        type Borrowed<'a> = &'a mut Self;
19003        fn take_or_borrow<'a>(
19004            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19005        ) -> Self::Borrowed<'a> {
19006            value
19007        }
19008    }
19009
19010    unsafe impl fidl::encoding::TypeMarker for BufferCollectionSetConstraintsRequest {
19011        type Owned = Self;
19012
19013        #[inline(always)]
19014        fn inline_align(_context: fidl::encoding::Context) -> usize {
19015            8
19016        }
19017
19018        #[inline(always)]
19019        fn inline_size(_context: fidl::encoding::Context) -> usize {
19020            16
19021        }
19022    }
19023
19024    unsafe impl
19025        fidl::encoding::Encode<
19026            BufferCollectionSetConstraintsRequest,
19027            fidl::encoding::DefaultFuchsiaResourceDialect,
19028        > for &mut BufferCollectionSetConstraintsRequest
19029    {
19030        unsafe fn encode(
19031            self,
19032            encoder: &mut fidl::encoding::Encoder<
19033                '_,
19034                fidl::encoding::DefaultFuchsiaResourceDialect,
19035            >,
19036            offset: usize,
19037            mut depth: fidl::encoding::Depth,
19038        ) -> fidl::Result<()> {
19039            encoder.debug_check_bounds::<BufferCollectionSetConstraintsRequest>(offset);
19040            // Vector header
19041            let max_ordinal: u64 = self.max_ordinal_present();
19042            encoder.write_num(max_ordinal, offset);
19043            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19044            // Calling encoder.out_of_line_offset(0) is not allowed.
19045            if max_ordinal == 0 {
19046                return Ok(());
19047            }
19048            depth.increment()?;
19049            let envelope_size = 8;
19050            let bytes_len = max_ordinal as usize * envelope_size;
19051            #[allow(unused_variables)]
19052            let offset = encoder.out_of_line_offset(bytes_len);
19053            let mut _prev_end_offset: usize = 0;
19054            if 1 > max_ordinal {
19055                return Ok(());
19056            }
19057
19058            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19059            // are envelope_size bytes.
19060            let cur_offset: usize = (1 - 1) * envelope_size;
19061
19062            // Zero reserved fields.
19063            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19064
19065            // Safety:
19066            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19067            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19068            //   envelope_size bytes, there is always sufficient room.
19069            fidl::encoding::encode_in_envelope_optional::<
19070                BufferCollectionConstraints,
19071                fidl::encoding::DefaultFuchsiaResourceDialect,
19072            >(
19073                self.constraints
19074                    .as_ref()
19075                    .map(<BufferCollectionConstraints as fidl::encoding::ValueTypeMarker>::borrow),
19076                encoder,
19077                offset + cur_offset,
19078                depth,
19079            )?;
19080
19081            _prev_end_offset = cur_offset + envelope_size;
19082
19083            Ok(())
19084        }
19085    }
19086
19087    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19088        for BufferCollectionSetConstraintsRequest
19089    {
19090        #[inline(always)]
19091        fn new_empty() -> Self {
19092            Self::default()
19093        }
19094
19095        unsafe fn decode(
19096            &mut self,
19097            decoder: &mut fidl::encoding::Decoder<
19098                '_,
19099                fidl::encoding::DefaultFuchsiaResourceDialect,
19100            >,
19101            offset: usize,
19102            mut depth: fidl::encoding::Depth,
19103        ) -> fidl::Result<()> {
19104            decoder.debug_check_bounds::<Self>(offset);
19105            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19106                None => return Err(fidl::Error::NotNullable),
19107                Some(len) => len,
19108            };
19109            // Calling decoder.out_of_line_offset(0) is not allowed.
19110            if len == 0 {
19111                return Ok(());
19112            };
19113            depth.increment()?;
19114            let envelope_size = 8;
19115            let bytes_len = len * envelope_size;
19116            let offset = decoder.out_of_line_offset(bytes_len)?;
19117            // Decode the envelope for each type.
19118            let mut _next_ordinal_to_read = 0;
19119            let mut next_offset = offset;
19120            let end_offset = offset + bytes_len;
19121            _next_ordinal_to_read += 1;
19122            if next_offset >= end_offset {
19123                return Ok(());
19124            }
19125
19126            // Decode unknown envelopes for gaps in ordinals.
19127            while _next_ordinal_to_read < 1 {
19128                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19129                _next_ordinal_to_read += 1;
19130                next_offset += envelope_size;
19131            }
19132
19133            let next_out_of_line = decoder.next_out_of_line();
19134            let handles_before = decoder.remaining_handles();
19135            if let Some((inlined, num_bytes, num_handles)) =
19136                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19137            {
19138                let member_inline_size =
19139                    <BufferCollectionConstraints as fidl::encoding::TypeMarker>::inline_size(
19140                        decoder.context,
19141                    );
19142                if inlined != (member_inline_size <= 4) {
19143                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19144                }
19145                let inner_offset;
19146                let mut inner_depth = depth.clone();
19147                if inlined {
19148                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19149                    inner_offset = next_offset;
19150                } else {
19151                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19152                    inner_depth.increment()?;
19153                }
19154                let val_ref = self.constraints.get_or_insert_with(|| {
19155                    fidl::new_empty!(
19156                        BufferCollectionConstraints,
19157                        fidl::encoding::DefaultFuchsiaResourceDialect
19158                    )
19159                });
19160                fidl::decode!(
19161                    BufferCollectionConstraints,
19162                    fidl::encoding::DefaultFuchsiaResourceDialect,
19163                    val_ref,
19164                    decoder,
19165                    inner_offset,
19166                    inner_depth
19167                )?;
19168                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19169                {
19170                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19171                }
19172                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19173                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19174                }
19175            }
19176
19177            next_offset += envelope_size;
19178
19179            // Decode the remaining unknown envelopes.
19180            while next_offset < end_offset {
19181                _next_ordinal_to_read += 1;
19182                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19183                next_offset += envelope_size;
19184            }
19185
19186            Ok(())
19187        }
19188    }
19189
19190    impl BufferCollectionTokenCreateBufferCollectionTokenGroupRequest {
19191        #[inline(always)]
19192        fn max_ordinal_present(&self) -> u64 {
19193            if let Some(_) = self.group_request {
19194                return 1;
19195            }
19196            0
19197        }
19198    }
19199
19200    impl fidl::encoding::ResourceTypeMarker
19201        for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19202    {
19203        type Borrowed<'a> = &'a mut Self;
19204        fn take_or_borrow<'a>(
19205            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19206        ) -> Self::Borrowed<'a> {
19207            value
19208        }
19209    }
19210
19211    unsafe impl fidl::encoding::TypeMarker
19212        for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19213    {
19214        type Owned = Self;
19215
19216        #[inline(always)]
19217        fn inline_align(_context: fidl::encoding::Context) -> usize {
19218            8
19219        }
19220
19221        #[inline(always)]
19222        fn inline_size(_context: fidl::encoding::Context) -> usize {
19223            16
19224        }
19225    }
19226
19227    unsafe impl
19228        fidl::encoding::Encode<
19229            BufferCollectionTokenCreateBufferCollectionTokenGroupRequest,
19230            fidl::encoding::DefaultFuchsiaResourceDialect,
19231        > for &mut BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19232    {
19233        unsafe fn encode(
19234            self,
19235            encoder: &mut fidl::encoding::Encoder<
19236                '_,
19237                fidl::encoding::DefaultFuchsiaResourceDialect,
19238            >,
19239            offset: usize,
19240            mut depth: fidl::encoding::Depth,
19241        ) -> fidl::Result<()> {
19242            encoder
19243                .debug_check_bounds::<BufferCollectionTokenCreateBufferCollectionTokenGroupRequest>(
19244                    offset,
19245                );
19246            // Vector header
19247            let max_ordinal: u64 = self.max_ordinal_present();
19248            encoder.write_num(max_ordinal, offset);
19249            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19250            // Calling encoder.out_of_line_offset(0) is not allowed.
19251            if max_ordinal == 0 {
19252                return Ok(());
19253            }
19254            depth.increment()?;
19255            let envelope_size = 8;
19256            let bytes_len = max_ordinal as usize * envelope_size;
19257            #[allow(unused_variables)]
19258            let offset = encoder.out_of_line_offset(bytes_len);
19259            let mut _prev_end_offset: usize = 0;
19260            if 1 > max_ordinal {
19261                return Ok(());
19262            }
19263
19264            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19265            // are envelope_size bytes.
19266            let cur_offset: usize = (1 - 1) * envelope_size;
19267
19268            // Zero reserved fields.
19269            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19270
19271            // Safety:
19272            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19273            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19274            //   envelope_size bytes, there is always sufficient room.
19275            fidl::encoding::encode_in_envelope_optional::<
19276                fidl::encoding::Endpoint<
19277                    fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19278                >,
19279                fidl::encoding::DefaultFuchsiaResourceDialect,
19280            >(
19281                self.group_request.as_mut().map(
19282                    <fidl::encoding::Endpoint<
19283                        fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19284                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19285                ),
19286                encoder,
19287                offset + cur_offset,
19288                depth,
19289            )?;
19290
19291            _prev_end_offset = cur_offset + envelope_size;
19292
19293            Ok(())
19294        }
19295    }
19296
19297    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19298        for BufferCollectionTokenCreateBufferCollectionTokenGroupRequest
19299    {
19300        #[inline(always)]
19301        fn new_empty() -> Self {
19302            Self::default()
19303        }
19304
19305        unsafe fn decode(
19306            &mut self,
19307            decoder: &mut fidl::encoding::Decoder<
19308                '_,
19309                fidl::encoding::DefaultFuchsiaResourceDialect,
19310            >,
19311            offset: usize,
19312            mut depth: fidl::encoding::Depth,
19313        ) -> fidl::Result<()> {
19314            decoder.debug_check_bounds::<Self>(offset);
19315            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19316                None => return Err(fidl::Error::NotNullable),
19317                Some(len) => len,
19318            };
19319            // Calling decoder.out_of_line_offset(0) is not allowed.
19320            if len == 0 {
19321                return Ok(());
19322            };
19323            depth.increment()?;
19324            let envelope_size = 8;
19325            let bytes_len = len * envelope_size;
19326            let offset = decoder.out_of_line_offset(bytes_len)?;
19327            // Decode the envelope for each type.
19328            let mut _next_ordinal_to_read = 0;
19329            let mut next_offset = offset;
19330            let end_offset = offset + bytes_len;
19331            _next_ordinal_to_read += 1;
19332            if next_offset >= end_offset {
19333                return Ok(());
19334            }
19335
19336            // Decode unknown envelopes for gaps in ordinals.
19337            while _next_ordinal_to_read < 1 {
19338                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19339                _next_ordinal_to_read += 1;
19340                next_offset += envelope_size;
19341            }
19342
19343            let next_out_of_line = decoder.next_out_of_line();
19344            let handles_before = decoder.remaining_handles();
19345            if let Some((inlined, num_bytes, num_handles)) =
19346                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19347            {
19348                let member_inline_size = <fidl::encoding::Endpoint<
19349                    fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19350                > as fidl::encoding::TypeMarker>::inline_size(
19351                    decoder.context
19352                );
19353                if inlined != (member_inline_size <= 4) {
19354                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19355                }
19356                let inner_offset;
19357                let mut inner_depth = depth.clone();
19358                if inlined {
19359                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19360                    inner_offset = next_offset;
19361                } else {
19362                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19363                    inner_depth.increment()?;
19364                }
19365                let val_ref = self.group_request.get_or_insert_with(|| {
19366                    fidl::new_empty!(
19367                        fidl::encoding::Endpoint<
19368                            fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19369                        >,
19370                        fidl::encoding::DefaultFuchsiaResourceDialect
19371                    )
19372                });
19373                fidl::decode!(
19374                    fidl::encoding::Endpoint<
19375                        fidl::endpoints::ServerEnd<BufferCollectionTokenGroupMarker>,
19376                    >,
19377                    fidl::encoding::DefaultFuchsiaResourceDialect,
19378                    val_ref,
19379                    decoder,
19380                    inner_offset,
19381                    inner_depth
19382                )?;
19383                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19384                {
19385                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19386                }
19387                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19388                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19389                }
19390            }
19391
19392            next_offset += envelope_size;
19393
19394            // Decode the remaining unknown envelopes.
19395            while next_offset < end_offset {
19396                _next_ordinal_to_read += 1;
19397                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19398                next_offset += envelope_size;
19399            }
19400
19401            Ok(())
19402        }
19403    }
19404
19405    impl BufferCollectionTokenDuplicateRequest {
19406        #[inline(always)]
19407        fn max_ordinal_present(&self) -> u64 {
19408            if let Some(_) = self.token_request {
19409                return 2;
19410            }
19411            if let Some(_) = self.rights_attenuation_mask {
19412                return 1;
19413            }
19414            0
19415        }
19416    }
19417
19418    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenDuplicateRequest {
19419        type Borrowed<'a> = &'a mut Self;
19420        fn take_or_borrow<'a>(
19421            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19422        ) -> Self::Borrowed<'a> {
19423            value
19424        }
19425    }
19426
19427    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenDuplicateRequest {
19428        type Owned = Self;
19429
19430        #[inline(always)]
19431        fn inline_align(_context: fidl::encoding::Context) -> usize {
19432            8
19433        }
19434
19435        #[inline(always)]
19436        fn inline_size(_context: fidl::encoding::Context) -> usize {
19437            16
19438        }
19439    }
19440
19441    unsafe impl
19442        fidl::encoding::Encode<
19443            BufferCollectionTokenDuplicateRequest,
19444            fidl::encoding::DefaultFuchsiaResourceDialect,
19445        > for &mut BufferCollectionTokenDuplicateRequest
19446    {
19447        unsafe fn encode(
19448            self,
19449            encoder: &mut fidl::encoding::Encoder<
19450                '_,
19451                fidl::encoding::DefaultFuchsiaResourceDialect,
19452            >,
19453            offset: usize,
19454            mut depth: fidl::encoding::Depth,
19455        ) -> fidl::Result<()> {
19456            encoder.debug_check_bounds::<BufferCollectionTokenDuplicateRequest>(offset);
19457            // Vector header
19458            let max_ordinal: u64 = self.max_ordinal_present();
19459            encoder.write_num(max_ordinal, offset);
19460            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19461            // Calling encoder.out_of_line_offset(0) is not allowed.
19462            if max_ordinal == 0 {
19463                return Ok(());
19464            }
19465            depth.increment()?;
19466            let envelope_size = 8;
19467            let bytes_len = max_ordinal as usize * envelope_size;
19468            #[allow(unused_variables)]
19469            let offset = encoder.out_of_line_offset(bytes_len);
19470            let mut _prev_end_offset: usize = 0;
19471            if 1 > max_ordinal {
19472                return Ok(());
19473            }
19474
19475            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19476            // are envelope_size bytes.
19477            let cur_offset: usize = (1 - 1) * envelope_size;
19478
19479            // Zero reserved fields.
19480            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19481
19482            // Safety:
19483            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19484            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19485            //   envelope_size bytes, there is always sufficient room.
19486            fidl::encoding::encode_in_envelope_optional::<
19487                fidl::Rights,
19488                fidl::encoding::DefaultFuchsiaResourceDialect,
19489            >(
19490                self.rights_attenuation_mask
19491                    .as_ref()
19492                    .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
19493                encoder,
19494                offset + cur_offset,
19495                depth,
19496            )?;
19497
19498            _prev_end_offset = cur_offset + envelope_size;
19499            if 2 > max_ordinal {
19500                return Ok(());
19501            }
19502
19503            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19504            // are envelope_size bytes.
19505            let cur_offset: usize = (2 - 1) * envelope_size;
19506
19507            // Zero reserved fields.
19508            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19509
19510            // Safety:
19511            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19512            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19513            //   envelope_size bytes, there is always sufficient room.
19514            fidl::encoding::encode_in_envelope_optional::<
19515                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
19516                fidl::encoding::DefaultFuchsiaResourceDialect,
19517            >(
19518                self.token_request.as_mut().map(
19519                    <fidl::encoding::Endpoint<
19520                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19521                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19522                ),
19523                encoder,
19524                offset + cur_offset,
19525                depth,
19526            )?;
19527
19528            _prev_end_offset = cur_offset + envelope_size;
19529
19530            Ok(())
19531        }
19532    }
19533
19534    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19535        for BufferCollectionTokenDuplicateRequest
19536    {
19537        #[inline(always)]
19538        fn new_empty() -> Self {
19539            Self::default()
19540        }
19541
19542        unsafe fn decode(
19543            &mut self,
19544            decoder: &mut fidl::encoding::Decoder<
19545                '_,
19546                fidl::encoding::DefaultFuchsiaResourceDialect,
19547            >,
19548            offset: usize,
19549            mut depth: fidl::encoding::Depth,
19550        ) -> fidl::Result<()> {
19551            decoder.debug_check_bounds::<Self>(offset);
19552            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19553                None => return Err(fidl::Error::NotNullable),
19554                Some(len) => len,
19555            };
19556            // Calling decoder.out_of_line_offset(0) is not allowed.
19557            if len == 0 {
19558                return Ok(());
19559            };
19560            depth.increment()?;
19561            let envelope_size = 8;
19562            let bytes_len = len * envelope_size;
19563            let offset = decoder.out_of_line_offset(bytes_len)?;
19564            // Decode the envelope for each type.
19565            let mut _next_ordinal_to_read = 0;
19566            let mut next_offset = offset;
19567            let end_offset = offset + bytes_len;
19568            _next_ordinal_to_read += 1;
19569            if next_offset >= end_offset {
19570                return Ok(());
19571            }
19572
19573            // Decode unknown envelopes for gaps in ordinals.
19574            while _next_ordinal_to_read < 1 {
19575                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19576                _next_ordinal_to_read += 1;
19577                next_offset += envelope_size;
19578            }
19579
19580            let next_out_of_line = decoder.next_out_of_line();
19581            let handles_before = decoder.remaining_handles();
19582            if let Some((inlined, num_bytes, num_handles)) =
19583                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19584            {
19585                let member_inline_size =
19586                    <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19587                if inlined != (member_inline_size <= 4) {
19588                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19589                }
19590                let inner_offset;
19591                let mut inner_depth = depth.clone();
19592                if inlined {
19593                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19594                    inner_offset = next_offset;
19595                } else {
19596                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19597                    inner_depth.increment()?;
19598                }
19599                let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
19600                    fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
19601                });
19602                fidl::decode!(
19603                    fidl::Rights,
19604                    fidl::encoding::DefaultFuchsiaResourceDialect,
19605                    val_ref,
19606                    decoder,
19607                    inner_offset,
19608                    inner_depth
19609                )?;
19610                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19611                {
19612                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19613                }
19614                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19615                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19616                }
19617            }
19618
19619            next_offset += envelope_size;
19620            _next_ordinal_to_read += 1;
19621            if next_offset >= end_offset {
19622                return Ok(());
19623            }
19624
19625            // Decode unknown envelopes for gaps in ordinals.
19626            while _next_ordinal_to_read < 2 {
19627                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19628                _next_ordinal_to_read += 1;
19629                next_offset += envelope_size;
19630            }
19631
19632            let next_out_of_line = decoder.next_out_of_line();
19633            let handles_before = decoder.remaining_handles();
19634            if let Some((inlined, num_bytes, num_handles)) =
19635                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19636            {
19637                let member_inline_size = <fidl::encoding::Endpoint<
19638                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19639                > as fidl::encoding::TypeMarker>::inline_size(
19640                    decoder.context
19641                );
19642                if inlined != (member_inline_size <= 4) {
19643                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19644                }
19645                let inner_offset;
19646                let mut inner_depth = depth.clone();
19647                if inlined {
19648                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19649                    inner_offset = next_offset;
19650                } else {
19651                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19652                    inner_depth.increment()?;
19653                }
19654                let val_ref = self.token_request.get_or_insert_with(|| {
19655                    fidl::new_empty!(
19656                        fidl::encoding::Endpoint<
19657                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19658                        >,
19659                        fidl::encoding::DefaultFuchsiaResourceDialect
19660                    )
19661                });
19662                fidl::decode!(
19663                    fidl::encoding::Endpoint<
19664                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19665                    >,
19666                    fidl::encoding::DefaultFuchsiaResourceDialect,
19667                    val_ref,
19668                    decoder,
19669                    inner_offset,
19670                    inner_depth
19671                )?;
19672                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19673                {
19674                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19675                }
19676                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19677                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19678                }
19679            }
19680
19681            next_offset += envelope_size;
19682
19683            // Decode the remaining unknown envelopes.
19684            while next_offset < end_offset {
19685                _next_ordinal_to_read += 1;
19686                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19687                next_offset += envelope_size;
19688            }
19689
19690            Ok(())
19691        }
19692    }
19693
19694    impl BufferCollectionTokenGroupCreateChildRequest {
19695        #[inline(always)]
19696        fn max_ordinal_present(&self) -> u64 {
19697            if let Some(_) = self.rights_attenuation_mask {
19698                return 2;
19699            }
19700            if let Some(_) = self.token_request {
19701                return 1;
19702            }
19703            0
19704        }
19705    }
19706
19707    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenGroupCreateChildRequest {
19708        type Borrowed<'a> = &'a mut Self;
19709        fn take_or_borrow<'a>(
19710            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19711        ) -> Self::Borrowed<'a> {
19712            value
19713        }
19714    }
19715
19716    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenGroupCreateChildRequest {
19717        type Owned = Self;
19718
19719        #[inline(always)]
19720        fn inline_align(_context: fidl::encoding::Context) -> usize {
19721            8
19722        }
19723
19724        #[inline(always)]
19725        fn inline_size(_context: fidl::encoding::Context) -> usize {
19726            16
19727        }
19728    }
19729
19730    unsafe impl
19731        fidl::encoding::Encode<
19732            BufferCollectionTokenGroupCreateChildRequest,
19733            fidl::encoding::DefaultFuchsiaResourceDialect,
19734        > for &mut BufferCollectionTokenGroupCreateChildRequest
19735    {
19736        unsafe fn encode(
19737            self,
19738            encoder: &mut fidl::encoding::Encoder<
19739                '_,
19740                fidl::encoding::DefaultFuchsiaResourceDialect,
19741            >,
19742            offset: usize,
19743            mut depth: fidl::encoding::Depth,
19744        ) -> fidl::Result<()> {
19745            encoder.debug_check_bounds::<BufferCollectionTokenGroupCreateChildRequest>(offset);
19746            // Vector header
19747            let max_ordinal: u64 = self.max_ordinal_present();
19748            encoder.write_num(max_ordinal, offset);
19749            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
19750            // Calling encoder.out_of_line_offset(0) is not allowed.
19751            if max_ordinal == 0 {
19752                return Ok(());
19753            }
19754            depth.increment()?;
19755            let envelope_size = 8;
19756            let bytes_len = max_ordinal as usize * envelope_size;
19757            #[allow(unused_variables)]
19758            let offset = encoder.out_of_line_offset(bytes_len);
19759            let mut _prev_end_offset: usize = 0;
19760            if 1 > max_ordinal {
19761                return Ok(());
19762            }
19763
19764            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19765            // are envelope_size bytes.
19766            let cur_offset: usize = (1 - 1) * envelope_size;
19767
19768            // Zero reserved fields.
19769            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19770
19771            // Safety:
19772            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19773            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19774            //   envelope_size bytes, there is always sufficient room.
19775            fidl::encoding::encode_in_envelope_optional::<
19776                fidl::encoding::Endpoint<fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>>,
19777                fidl::encoding::DefaultFuchsiaResourceDialect,
19778            >(
19779                self.token_request.as_mut().map(
19780                    <fidl::encoding::Endpoint<
19781                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19782                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
19783                ),
19784                encoder,
19785                offset + cur_offset,
19786                depth,
19787            )?;
19788
19789            _prev_end_offset = cur_offset + envelope_size;
19790            if 2 > max_ordinal {
19791                return Ok(());
19792            }
19793
19794            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
19795            // are envelope_size bytes.
19796            let cur_offset: usize = (2 - 1) * envelope_size;
19797
19798            // Zero reserved fields.
19799            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
19800
19801            // Safety:
19802            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
19803            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
19804            //   envelope_size bytes, there is always sufficient room.
19805            fidl::encoding::encode_in_envelope_optional::<
19806                fidl::Rights,
19807                fidl::encoding::DefaultFuchsiaResourceDialect,
19808            >(
19809                self.rights_attenuation_mask
19810                    .as_ref()
19811                    .map(<fidl::Rights as fidl::encoding::ValueTypeMarker>::borrow),
19812                encoder,
19813                offset + cur_offset,
19814                depth,
19815            )?;
19816
19817            _prev_end_offset = cur_offset + envelope_size;
19818
19819            Ok(())
19820        }
19821    }
19822
19823    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
19824        for BufferCollectionTokenGroupCreateChildRequest
19825    {
19826        #[inline(always)]
19827        fn new_empty() -> Self {
19828            Self::default()
19829        }
19830
19831        unsafe fn decode(
19832            &mut self,
19833            decoder: &mut fidl::encoding::Decoder<
19834                '_,
19835                fidl::encoding::DefaultFuchsiaResourceDialect,
19836            >,
19837            offset: usize,
19838            mut depth: fidl::encoding::Depth,
19839        ) -> fidl::Result<()> {
19840            decoder.debug_check_bounds::<Self>(offset);
19841            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
19842                None => return Err(fidl::Error::NotNullable),
19843                Some(len) => len,
19844            };
19845            // Calling decoder.out_of_line_offset(0) is not allowed.
19846            if len == 0 {
19847                return Ok(());
19848            };
19849            depth.increment()?;
19850            let envelope_size = 8;
19851            let bytes_len = len * envelope_size;
19852            let offset = decoder.out_of_line_offset(bytes_len)?;
19853            // Decode the envelope for each type.
19854            let mut _next_ordinal_to_read = 0;
19855            let mut next_offset = offset;
19856            let end_offset = offset + bytes_len;
19857            _next_ordinal_to_read += 1;
19858            if next_offset >= end_offset {
19859                return Ok(());
19860            }
19861
19862            // Decode unknown envelopes for gaps in ordinals.
19863            while _next_ordinal_to_read < 1 {
19864                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19865                _next_ordinal_to_read += 1;
19866                next_offset += envelope_size;
19867            }
19868
19869            let next_out_of_line = decoder.next_out_of_line();
19870            let handles_before = decoder.remaining_handles();
19871            if let Some((inlined, num_bytes, num_handles)) =
19872                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19873            {
19874                let member_inline_size = <fidl::encoding::Endpoint<
19875                    fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19876                > as fidl::encoding::TypeMarker>::inline_size(
19877                    decoder.context
19878                );
19879                if inlined != (member_inline_size <= 4) {
19880                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19881                }
19882                let inner_offset;
19883                let mut inner_depth = depth.clone();
19884                if inlined {
19885                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19886                    inner_offset = next_offset;
19887                } else {
19888                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19889                    inner_depth.increment()?;
19890                }
19891                let val_ref = self.token_request.get_or_insert_with(|| {
19892                    fidl::new_empty!(
19893                        fidl::encoding::Endpoint<
19894                            fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19895                        >,
19896                        fidl::encoding::DefaultFuchsiaResourceDialect
19897                    )
19898                });
19899                fidl::decode!(
19900                    fidl::encoding::Endpoint<
19901                        fidl::endpoints::ServerEnd<BufferCollectionTokenMarker>,
19902                    >,
19903                    fidl::encoding::DefaultFuchsiaResourceDialect,
19904                    val_ref,
19905                    decoder,
19906                    inner_offset,
19907                    inner_depth
19908                )?;
19909                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19910                {
19911                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19912                }
19913                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19914                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19915                }
19916            }
19917
19918            next_offset += envelope_size;
19919            _next_ordinal_to_read += 1;
19920            if next_offset >= end_offset {
19921                return Ok(());
19922            }
19923
19924            // Decode unknown envelopes for gaps in ordinals.
19925            while _next_ordinal_to_read < 2 {
19926                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19927                _next_ordinal_to_read += 1;
19928                next_offset += envelope_size;
19929            }
19930
19931            let next_out_of_line = decoder.next_out_of_line();
19932            let handles_before = decoder.remaining_handles();
19933            if let Some((inlined, num_bytes, num_handles)) =
19934                fidl::encoding::decode_envelope_header(decoder, next_offset)?
19935            {
19936                let member_inline_size =
19937                    <fidl::Rights as fidl::encoding::TypeMarker>::inline_size(decoder.context);
19938                if inlined != (member_inline_size <= 4) {
19939                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
19940                }
19941                let inner_offset;
19942                let mut inner_depth = depth.clone();
19943                if inlined {
19944                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
19945                    inner_offset = next_offset;
19946                } else {
19947                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
19948                    inner_depth.increment()?;
19949                }
19950                let val_ref = self.rights_attenuation_mask.get_or_insert_with(|| {
19951                    fidl::new_empty!(fidl::Rights, fidl::encoding::DefaultFuchsiaResourceDialect)
19952                });
19953                fidl::decode!(
19954                    fidl::Rights,
19955                    fidl::encoding::DefaultFuchsiaResourceDialect,
19956                    val_ref,
19957                    decoder,
19958                    inner_offset,
19959                    inner_depth
19960                )?;
19961                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
19962                {
19963                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
19964                }
19965                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
19966                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
19967                }
19968            }
19969
19970            next_offset += envelope_size;
19971
19972            // Decode the remaining unknown envelopes.
19973            while next_offset < end_offset {
19974                _next_ordinal_to_read += 1;
19975                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
19976                next_offset += envelope_size;
19977            }
19978
19979            Ok(())
19980        }
19981    }
19982
19983    impl BufferCollectionTokenGroupCreateChildrenSyncResponse {
19984        #[inline(always)]
19985        fn max_ordinal_present(&self) -> u64 {
19986            if let Some(_) = self.tokens {
19987                return 1;
19988            }
19989            0
19990        }
19991    }
19992
19993    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenGroupCreateChildrenSyncResponse {
19994        type Borrowed<'a> = &'a mut Self;
19995        fn take_or_borrow<'a>(
19996            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
19997        ) -> Self::Borrowed<'a> {
19998            value
19999        }
20000    }
20001
20002    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenGroupCreateChildrenSyncResponse {
20003        type Owned = Self;
20004
20005        #[inline(always)]
20006        fn inline_align(_context: fidl::encoding::Context) -> usize {
20007            8
20008        }
20009
20010        #[inline(always)]
20011        fn inline_size(_context: fidl::encoding::Context) -> usize {
20012            16
20013        }
20014    }
20015
20016    unsafe impl
20017        fidl::encoding::Encode<
20018            BufferCollectionTokenGroupCreateChildrenSyncResponse,
20019            fidl::encoding::DefaultFuchsiaResourceDialect,
20020        > for &mut BufferCollectionTokenGroupCreateChildrenSyncResponse
20021    {
20022        unsafe fn encode(
20023            self,
20024            encoder: &mut fidl::encoding::Encoder<
20025                '_,
20026                fidl::encoding::DefaultFuchsiaResourceDialect,
20027            >,
20028            offset: usize,
20029            mut depth: fidl::encoding::Depth,
20030        ) -> fidl::Result<()> {
20031            encoder
20032                .debug_check_bounds::<BufferCollectionTokenGroupCreateChildrenSyncResponse>(offset);
20033            // Vector header
20034            let max_ordinal: u64 = self.max_ordinal_present();
20035            encoder.write_num(max_ordinal, offset);
20036            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20037            // Calling encoder.out_of_line_offset(0) is not allowed.
20038            if max_ordinal == 0 {
20039                return Ok(());
20040            }
20041            depth.increment()?;
20042            let envelope_size = 8;
20043            let bytes_len = max_ordinal as usize * envelope_size;
20044            #[allow(unused_variables)]
20045            let offset = encoder.out_of_line_offset(bytes_len);
20046            let mut _prev_end_offset: usize = 0;
20047            if 1 > max_ordinal {
20048                return Ok(());
20049            }
20050
20051            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20052            // are envelope_size bytes.
20053            let cur_offset: usize = (1 - 1) * envelope_size;
20054
20055            // Zero reserved fields.
20056            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20057
20058            // Safety:
20059            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20060            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20061            //   envelope_size bytes, there is always sufficient room.
20062            fidl::encoding::encode_in_envelope_optional::<
20063                fidl::encoding::Vector<
20064                    fidl::encoding::Endpoint<
20065                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20066                    >,
20067                    64,
20068                >,
20069                fidl::encoding::DefaultFuchsiaResourceDialect,
20070            >(
20071                self.tokens.as_mut().map(
20072                    <fidl::encoding::Vector<
20073                        fidl::encoding::Endpoint<
20074                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20075                        >,
20076                        64,
20077                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20078                ),
20079                encoder,
20080                offset + cur_offset,
20081                depth,
20082            )?;
20083
20084            _prev_end_offset = cur_offset + envelope_size;
20085
20086            Ok(())
20087        }
20088    }
20089
20090    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20091        for BufferCollectionTokenGroupCreateChildrenSyncResponse
20092    {
20093        #[inline(always)]
20094        fn new_empty() -> Self {
20095            Self::default()
20096        }
20097
20098        unsafe fn decode(
20099            &mut self,
20100            decoder: &mut fidl::encoding::Decoder<
20101                '_,
20102                fidl::encoding::DefaultFuchsiaResourceDialect,
20103            >,
20104            offset: usize,
20105            mut depth: fidl::encoding::Depth,
20106        ) -> fidl::Result<()> {
20107            decoder.debug_check_bounds::<Self>(offset);
20108            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20109                None => return Err(fidl::Error::NotNullable),
20110                Some(len) => len,
20111            };
20112            // Calling decoder.out_of_line_offset(0) is not allowed.
20113            if len == 0 {
20114                return Ok(());
20115            };
20116            depth.increment()?;
20117            let envelope_size = 8;
20118            let bytes_len = len * envelope_size;
20119            let offset = decoder.out_of_line_offset(bytes_len)?;
20120            // Decode the envelope for each type.
20121            let mut _next_ordinal_to_read = 0;
20122            let mut next_offset = offset;
20123            let end_offset = offset + bytes_len;
20124            _next_ordinal_to_read += 1;
20125            if next_offset >= end_offset {
20126                return Ok(());
20127            }
20128
20129            // Decode unknown envelopes for gaps in ordinals.
20130            while _next_ordinal_to_read < 1 {
20131                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20132                _next_ordinal_to_read += 1;
20133                next_offset += envelope_size;
20134            }
20135
20136            let next_out_of_line = decoder.next_out_of_line();
20137            let handles_before = decoder.remaining_handles();
20138            if let Some((inlined, num_bytes, num_handles)) =
20139                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20140            {
20141                let member_inline_size = <fidl::encoding::Vector<
20142                    fidl::encoding::Endpoint<
20143                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20144                    >,
20145                    64,
20146                > as fidl::encoding::TypeMarker>::inline_size(
20147                    decoder.context
20148                );
20149                if inlined != (member_inline_size <= 4) {
20150                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20151                }
20152                let inner_offset;
20153                let mut inner_depth = depth.clone();
20154                if inlined {
20155                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20156                    inner_offset = next_offset;
20157                } else {
20158                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20159                    inner_depth.increment()?;
20160                }
20161                let val_ref = self.tokens.get_or_insert_with(|| {
20162                    fidl::new_empty!(
20163                        fidl::encoding::Vector<
20164                            fidl::encoding::Endpoint<
20165                                fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20166                            >,
20167                            64,
20168                        >,
20169                        fidl::encoding::DefaultFuchsiaResourceDialect
20170                    )
20171                });
20172                fidl::decode!(
20173                    fidl::encoding::Vector<
20174                        fidl::encoding::Endpoint<
20175                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20176                        >,
20177                        64,
20178                    >,
20179                    fidl::encoding::DefaultFuchsiaResourceDialect,
20180                    val_ref,
20181                    decoder,
20182                    inner_offset,
20183                    inner_depth
20184                )?;
20185                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20186                {
20187                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20188                }
20189                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20190                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20191                }
20192            }
20193
20194            next_offset += envelope_size;
20195
20196            // Decode the remaining unknown envelopes.
20197            while next_offset < end_offset {
20198                _next_ordinal_to_read += 1;
20199                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20200                next_offset += envelope_size;
20201            }
20202
20203            Ok(())
20204        }
20205    }
20206
20207    impl BufferCollectionTokenDuplicateSyncResponse {
20208        #[inline(always)]
20209        fn max_ordinal_present(&self) -> u64 {
20210            if let Some(_) = self.tokens {
20211                return 1;
20212            }
20213            0
20214        }
20215    }
20216
20217    impl fidl::encoding::ResourceTypeMarker for BufferCollectionTokenDuplicateSyncResponse {
20218        type Borrowed<'a> = &'a mut Self;
20219        fn take_or_borrow<'a>(
20220            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20221        ) -> Self::Borrowed<'a> {
20222            value
20223        }
20224    }
20225
20226    unsafe impl fidl::encoding::TypeMarker for BufferCollectionTokenDuplicateSyncResponse {
20227        type Owned = Self;
20228
20229        #[inline(always)]
20230        fn inline_align(_context: fidl::encoding::Context) -> usize {
20231            8
20232        }
20233
20234        #[inline(always)]
20235        fn inline_size(_context: fidl::encoding::Context) -> usize {
20236            16
20237        }
20238    }
20239
20240    unsafe impl
20241        fidl::encoding::Encode<
20242            BufferCollectionTokenDuplicateSyncResponse,
20243            fidl::encoding::DefaultFuchsiaResourceDialect,
20244        > for &mut BufferCollectionTokenDuplicateSyncResponse
20245    {
20246        unsafe fn encode(
20247            self,
20248            encoder: &mut fidl::encoding::Encoder<
20249                '_,
20250                fidl::encoding::DefaultFuchsiaResourceDialect,
20251            >,
20252            offset: usize,
20253            mut depth: fidl::encoding::Depth,
20254        ) -> fidl::Result<()> {
20255            encoder.debug_check_bounds::<BufferCollectionTokenDuplicateSyncResponse>(offset);
20256            // Vector header
20257            let max_ordinal: u64 = self.max_ordinal_present();
20258            encoder.write_num(max_ordinal, offset);
20259            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20260            // Calling encoder.out_of_line_offset(0) is not allowed.
20261            if max_ordinal == 0 {
20262                return Ok(());
20263            }
20264            depth.increment()?;
20265            let envelope_size = 8;
20266            let bytes_len = max_ordinal as usize * envelope_size;
20267            #[allow(unused_variables)]
20268            let offset = encoder.out_of_line_offset(bytes_len);
20269            let mut _prev_end_offset: usize = 0;
20270            if 1 > max_ordinal {
20271                return Ok(());
20272            }
20273
20274            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20275            // are envelope_size bytes.
20276            let cur_offset: usize = (1 - 1) * envelope_size;
20277
20278            // Zero reserved fields.
20279            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20280
20281            // Safety:
20282            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20283            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20284            //   envelope_size bytes, there is always sufficient room.
20285            fidl::encoding::encode_in_envelope_optional::<
20286                fidl::encoding::Vector<
20287                    fidl::encoding::Endpoint<
20288                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20289                    >,
20290                    64,
20291                >,
20292                fidl::encoding::DefaultFuchsiaResourceDialect,
20293            >(
20294                self.tokens.as_mut().map(
20295                    <fidl::encoding::Vector<
20296                        fidl::encoding::Endpoint<
20297                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20298                        >,
20299                        64,
20300                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20301                ),
20302                encoder,
20303                offset + cur_offset,
20304                depth,
20305            )?;
20306
20307            _prev_end_offset = cur_offset + envelope_size;
20308
20309            Ok(())
20310        }
20311    }
20312
20313    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20314        for BufferCollectionTokenDuplicateSyncResponse
20315    {
20316        #[inline(always)]
20317        fn new_empty() -> Self {
20318            Self::default()
20319        }
20320
20321        unsafe fn decode(
20322            &mut self,
20323            decoder: &mut fidl::encoding::Decoder<
20324                '_,
20325                fidl::encoding::DefaultFuchsiaResourceDialect,
20326            >,
20327            offset: usize,
20328            mut depth: fidl::encoding::Depth,
20329        ) -> fidl::Result<()> {
20330            decoder.debug_check_bounds::<Self>(offset);
20331            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20332                None => return Err(fidl::Error::NotNullable),
20333                Some(len) => len,
20334            };
20335            // Calling decoder.out_of_line_offset(0) is not allowed.
20336            if len == 0 {
20337                return Ok(());
20338            };
20339            depth.increment()?;
20340            let envelope_size = 8;
20341            let bytes_len = len * envelope_size;
20342            let offset = decoder.out_of_line_offset(bytes_len)?;
20343            // Decode the envelope for each type.
20344            let mut _next_ordinal_to_read = 0;
20345            let mut next_offset = offset;
20346            let end_offset = offset + bytes_len;
20347            _next_ordinal_to_read += 1;
20348            if next_offset >= end_offset {
20349                return Ok(());
20350            }
20351
20352            // Decode unknown envelopes for gaps in ordinals.
20353            while _next_ordinal_to_read < 1 {
20354                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20355                _next_ordinal_to_read += 1;
20356                next_offset += envelope_size;
20357            }
20358
20359            let next_out_of_line = decoder.next_out_of_line();
20360            let handles_before = decoder.remaining_handles();
20361            if let Some((inlined, num_bytes, num_handles)) =
20362                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20363            {
20364                let member_inline_size = <fidl::encoding::Vector<
20365                    fidl::encoding::Endpoint<
20366                        fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20367                    >,
20368                    64,
20369                > as fidl::encoding::TypeMarker>::inline_size(
20370                    decoder.context
20371                );
20372                if inlined != (member_inline_size <= 4) {
20373                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20374                }
20375                let inner_offset;
20376                let mut inner_depth = depth.clone();
20377                if inlined {
20378                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20379                    inner_offset = next_offset;
20380                } else {
20381                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20382                    inner_depth.increment()?;
20383                }
20384                let val_ref = self.tokens.get_or_insert_with(|| {
20385                    fidl::new_empty!(
20386                        fidl::encoding::Vector<
20387                            fidl::encoding::Endpoint<
20388                                fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20389                            >,
20390                            64,
20391                        >,
20392                        fidl::encoding::DefaultFuchsiaResourceDialect
20393                    )
20394                });
20395                fidl::decode!(
20396                    fidl::encoding::Vector<
20397                        fidl::encoding::Endpoint<
20398                            fidl::endpoints::ClientEnd<BufferCollectionTokenMarker>,
20399                        >,
20400                        64,
20401                    >,
20402                    fidl::encoding::DefaultFuchsiaResourceDialect,
20403                    val_ref,
20404                    decoder,
20405                    inner_offset,
20406                    inner_depth
20407                )?;
20408                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20409                {
20410                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20411                }
20412                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20413                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20414                }
20415            }
20416
20417            next_offset += envelope_size;
20418
20419            // Decode the remaining unknown envelopes.
20420            while next_offset < end_offset {
20421                _next_ordinal_to_read += 1;
20422                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20423                next_offset += envelope_size;
20424            }
20425
20426            Ok(())
20427        }
20428    }
20429
20430    impl BufferCollectionWaitForAllBuffersAllocatedResponse {
20431        #[inline(always)]
20432        fn max_ordinal_present(&self) -> u64 {
20433            if let Some(_) = self.buffer_collection_info {
20434                return 1;
20435            }
20436            0
20437        }
20438    }
20439
20440    impl fidl::encoding::ResourceTypeMarker for BufferCollectionWaitForAllBuffersAllocatedResponse {
20441        type Borrowed<'a> = &'a mut Self;
20442        fn take_or_borrow<'a>(
20443            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20444        ) -> Self::Borrowed<'a> {
20445            value
20446        }
20447    }
20448
20449    unsafe impl fidl::encoding::TypeMarker for BufferCollectionWaitForAllBuffersAllocatedResponse {
20450        type Owned = Self;
20451
20452        #[inline(always)]
20453        fn inline_align(_context: fidl::encoding::Context) -> usize {
20454            8
20455        }
20456
20457        #[inline(always)]
20458        fn inline_size(_context: fidl::encoding::Context) -> usize {
20459            16
20460        }
20461    }
20462
20463    unsafe impl
20464        fidl::encoding::Encode<
20465            BufferCollectionWaitForAllBuffersAllocatedResponse,
20466            fidl::encoding::DefaultFuchsiaResourceDialect,
20467        > for &mut BufferCollectionWaitForAllBuffersAllocatedResponse
20468    {
20469        unsafe fn encode(
20470            self,
20471            encoder: &mut fidl::encoding::Encoder<
20472                '_,
20473                fidl::encoding::DefaultFuchsiaResourceDialect,
20474            >,
20475            offset: usize,
20476            mut depth: fidl::encoding::Depth,
20477        ) -> fidl::Result<()> {
20478            encoder
20479                .debug_check_bounds::<BufferCollectionWaitForAllBuffersAllocatedResponse>(offset);
20480            // Vector header
20481            let max_ordinal: u64 = self.max_ordinal_present();
20482            encoder.write_num(max_ordinal, offset);
20483            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20484            // Calling encoder.out_of_line_offset(0) is not allowed.
20485            if max_ordinal == 0 {
20486                return Ok(());
20487            }
20488            depth.increment()?;
20489            let envelope_size = 8;
20490            let bytes_len = max_ordinal as usize * envelope_size;
20491            #[allow(unused_variables)]
20492            let offset = encoder.out_of_line_offset(bytes_len);
20493            let mut _prev_end_offset: usize = 0;
20494            if 1 > max_ordinal {
20495                return Ok(());
20496            }
20497
20498            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20499            // are envelope_size bytes.
20500            let cur_offset: usize = (1 - 1) * envelope_size;
20501
20502            // Zero reserved fields.
20503            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20504
20505            // Safety:
20506            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20507            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20508            //   envelope_size bytes, there is always sufficient room.
20509            fidl::encoding::encode_in_envelope_optional::<
20510                BufferCollectionInfo,
20511                fidl::encoding::DefaultFuchsiaResourceDialect,
20512            >(
20513                self.buffer_collection_info.as_mut().map(
20514                    <BufferCollectionInfo as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20515                ),
20516                encoder,
20517                offset + cur_offset,
20518                depth,
20519            )?;
20520
20521            _prev_end_offset = cur_offset + envelope_size;
20522
20523            Ok(())
20524        }
20525    }
20526
20527    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20528        for BufferCollectionWaitForAllBuffersAllocatedResponse
20529    {
20530        #[inline(always)]
20531        fn new_empty() -> Self {
20532            Self::default()
20533        }
20534
20535        unsafe fn decode(
20536            &mut self,
20537            decoder: &mut fidl::encoding::Decoder<
20538                '_,
20539                fidl::encoding::DefaultFuchsiaResourceDialect,
20540            >,
20541            offset: usize,
20542            mut depth: fidl::encoding::Depth,
20543        ) -> fidl::Result<()> {
20544            decoder.debug_check_bounds::<Self>(offset);
20545            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20546                None => return Err(fidl::Error::NotNullable),
20547                Some(len) => len,
20548            };
20549            // Calling decoder.out_of_line_offset(0) is not allowed.
20550            if len == 0 {
20551                return Ok(());
20552            };
20553            depth.increment()?;
20554            let envelope_size = 8;
20555            let bytes_len = len * envelope_size;
20556            let offset = decoder.out_of_line_offset(bytes_len)?;
20557            // Decode the envelope for each type.
20558            let mut _next_ordinal_to_read = 0;
20559            let mut next_offset = offset;
20560            let end_offset = offset + bytes_len;
20561            _next_ordinal_to_read += 1;
20562            if next_offset >= end_offset {
20563                return Ok(());
20564            }
20565
20566            // Decode unknown envelopes for gaps in ordinals.
20567            while _next_ordinal_to_read < 1 {
20568                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20569                _next_ordinal_to_read += 1;
20570                next_offset += envelope_size;
20571            }
20572
20573            let next_out_of_line = decoder.next_out_of_line();
20574            let handles_before = decoder.remaining_handles();
20575            if let Some((inlined, num_bytes, num_handles)) =
20576                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20577            {
20578                let member_inline_size =
20579                    <BufferCollectionInfo as fidl::encoding::TypeMarker>::inline_size(
20580                        decoder.context,
20581                    );
20582                if inlined != (member_inline_size <= 4) {
20583                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20584                }
20585                let inner_offset;
20586                let mut inner_depth = depth.clone();
20587                if inlined {
20588                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20589                    inner_offset = next_offset;
20590                } else {
20591                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20592                    inner_depth.increment()?;
20593                }
20594                let val_ref = self.buffer_collection_info.get_or_insert_with(|| {
20595                    fidl::new_empty!(
20596                        BufferCollectionInfo,
20597                        fidl::encoding::DefaultFuchsiaResourceDialect
20598                    )
20599                });
20600                fidl::decode!(
20601                    BufferCollectionInfo,
20602                    fidl::encoding::DefaultFuchsiaResourceDialect,
20603                    val_ref,
20604                    decoder,
20605                    inner_offset,
20606                    inner_depth
20607                )?;
20608                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20609                {
20610                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20611                }
20612                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20613                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20614                }
20615            }
20616
20617            next_offset += envelope_size;
20618
20619            // Decode the remaining unknown envelopes.
20620            while next_offset < end_offset {
20621                _next_ordinal_to_read += 1;
20622                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20623                next_offset += envelope_size;
20624            }
20625
20626            Ok(())
20627        }
20628    }
20629
20630    impl NodeAttachNodeTrackingRequest {
20631        #[inline(always)]
20632        fn max_ordinal_present(&self) -> u64 {
20633            if let Some(_) = self.server_end {
20634                return 1;
20635            }
20636            0
20637        }
20638    }
20639
20640    impl fidl::encoding::ResourceTypeMarker for NodeAttachNodeTrackingRequest {
20641        type Borrowed<'a> = &'a mut Self;
20642        fn take_or_borrow<'a>(
20643            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20644        ) -> Self::Borrowed<'a> {
20645            value
20646        }
20647    }
20648
20649    unsafe impl fidl::encoding::TypeMarker for NodeAttachNodeTrackingRequest {
20650        type Owned = Self;
20651
20652        #[inline(always)]
20653        fn inline_align(_context: fidl::encoding::Context) -> usize {
20654            8
20655        }
20656
20657        #[inline(always)]
20658        fn inline_size(_context: fidl::encoding::Context) -> usize {
20659            16
20660        }
20661    }
20662
20663    unsafe impl
20664        fidl::encoding::Encode<
20665            NodeAttachNodeTrackingRequest,
20666            fidl::encoding::DefaultFuchsiaResourceDialect,
20667        > for &mut NodeAttachNodeTrackingRequest
20668    {
20669        unsafe fn encode(
20670            self,
20671            encoder: &mut fidl::encoding::Encoder<
20672                '_,
20673                fidl::encoding::DefaultFuchsiaResourceDialect,
20674            >,
20675            offset: usize,
20676            mut depth: fidl::encoding::Depth,
20677        ) -> fidl::Result<()> {
20678            encoder.debug_check_bounds::<NodeAttachNodeTrackingRequest>(offset);
20679            // Vector header
20680            let max_ordinal: u64 = self.max_ordinal_present();
20681            encoder.write_num(max_ordinal, offset);
20682            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20683            // Calling encoder.out_of_line_offset(0) is not allowed.
20684            if max_ordinal == 0 {
20685                return Ok(());
20686            }
20687            depth.increment()?;
20688            let envelope_size = 8;
20689            let bytes_len = max_ordinal as usize * envelope_size;
20690            #[allow(unused_variables)]
20691            let offset = encoder.out_of_line_offset(bytes_len);
20692            let mut _prev_end_offset: usize = 0;
20693            if 1 > max_ordinal {
20694                return Ok(());
20695            }
20696
20697            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20698            // are envelope_size bytes.
20699            let cur_offset: usize = (1 - 1) * envelope_size;
20700
20701            // Zero reserved fields.
20702            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20703
20704            // Safety:
20705            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20706            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20707            //   envelope_size bytes, there is always sufficient room.
20708            fidl::encoding::encode_in_envelope_optional::<
20709                fidl::encoding::HandleType<
20710                    fidl::EventPair,
20711                    { fidl::ObjectType::EVENTPAIR.into_raw() },
20712                    2147483648,
20713                >,
20714                fidl::encoding::DefaultFuchsiaResourceDialect,
20715            >(
20716                self.server_end.as_mut().map(
20717                    <fidl::encoding::HandleType<
20718                        fidl::EventPair,
20719                        { fidl::ObjectType::EVENTPAIR.into_raw() },
20720                        2147483648,
20721                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20722                ),
20723                encoder,
20724                offset + cur_offset,
20725                depth,
20726            )?;
20727
20728            _prev_end_offset = cur_offset + envelope_size;
20729
20730            Ok(())
20731        }
20732    }
20733
20734    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20735        for NodeAttachNodeTrackingRequest
20736    {
20737        #[inline(always)]
20738        fn new_empty() -> Self {
20739            Self::default()
20740        }
20741
20742        unsafe fn decode(
20743            &mut self,
20744            decoder: &mut fidl::encoding::Decoder<
20745                '_,
20746                fidl::encoding::DefaultFuchsiaResourceDialect,
20747            >,
20748            offset: usize,
20749            mut depth: fidl::encoding::Depth,
20750        ) -> fidl::Result<()> {
20751            decoder.debug_check_bounds::<Self>(offset);
20752            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20753                None => return Err(fidl::Error::NotNullable),
20754                Some(len) => len,
20755            };
20756            // Calling decoder.out_of_line_offset(0) is not allowed.
20757            if len == 0 {
20758                return Ok(());
20759            };
20760            depth.increment()?;
20761            let envelope_size = 8;
20762            let bytes_len = len * envelope_size;
20763            let offset = decoder.out_of_line_offset(bytes_len)?;
20764            // Decode the envelope for each type.
20765            let mut _next_ordinal_to_read = 0;
20766            let mut next_offset = offset;
20767            let end_offset = offset + bytes_len;
20768            _next_ordinal_to_read += 1;
20769            if next_offset >= end_offset {
20770                return Ok(());
20771            }
20772
20773            // Decode unknown envelopes for gaps in ordinals.
20774            while _next_ordinal_to_read < 1 {
20775                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20776                _next_ordinal_to_read += 1;
20777                next_offset += envelope_size;
20778            }
20779
20780            let next_out_of_line = decoder.next_out_of_line();
20781            let handles_before = decoder.remaining_handles();
20782            if let Some((inlined, num_bytes, num_handles)) =
20783                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20784            {
20785                let member_inline_size = <fidl::encoding::HandleType<
20786                    fidl::EventPair,
20787                    { fidl::ObjectType::EVENTPAIR.into_raw() },
20788                    2147483648,
20789                > as fidl::encoding::TypeMarker>::inline_size(
20790                    decoder.context
20791                );
20792                if inlined != (member_inline_size <= 4) {
20793                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20794                }
20795                let inner_offset;
20796                let mut inner_depth = depth.clone();
20797                if inlined {
20798                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20799                    inner_offset = next_offset;
20800                } else {
20801                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
20802                    inner_depth.increment()?;
20803                }
20804                let val_ref =
20805                self.server_end.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
20806                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
20807                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
20808                {
20809                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
20810                }
20811                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
20812                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
20813                }
20814            }
20815
20816            next_offset += envelope_size;
20817
20818            // Decode the remaining unknown envelopes.
20819            while next_offset < end_offset {
20820                _next_ordinal_to_read += 1;
20821                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20822                next_offset += envelope_size;
20823            }
20824
20825            Ok(())
20826        }
20827    }
20828
20829    impl NodeIsAlternateForRequest {
20830        #[inline(always)]
20831        fn max_ordinal_present(&self) -> u64 {
20832            if let Some(_) = self.node_ref {
20833                return 1;
20834            }
20835            0
20836        }
20837    }
20838
20839    impl fidl::encoding::ResourceTypeMarker for NodeIsAlternateForRequest {
20840        type Borrowed<'a> = &'a mut Self;
20841        fn take_or_borrow<'a>(
20842            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
20843        ) -> Self::Borrowed<'a> {
20844            value
20845        }
20846    }
20847
20848    unsafe impl fidl::encoding::TypeMarker for NodeIsAlternateForRequest {
20849        type Owned = Self;
20850
20851        #[inline(always)]
20852        fn inline_align(_context: fidl::encoding::Context) -> usize {
20853            8
20854        }
20855
20856        #[inline(always)]
20857        fn inline_size(_context: fidl::encoding::Context) -> usize {
20858            16
20859        }
20860    }
20861
20862    unsafe impl
20863        fidl::encoding::Encode<
20864            NodeIsAlternateForRequest,
20865            fidl::encoding::DefaultFuchsiaResourceDialect,
20866        > for &mut NodeIsAlternateForRequest
20867    {
20868        unsafe fn encode(
20869            self,
20870            encoder: &mut fidl::encoding::Encoder<
20871                '_,
20872                fidl::encoding::DefaultFuchsiaResourceDialect,
20873            >,
20874            offset: usize,
20875            mut depth: fidl::encoding::Depth,
20876        ) -> fidl::Result<()> {
20877            encoder.debug_check_bounds::<NodeIsAlternateForRequest>(offset);
20878            // Vector header
20879            let max_ordinal: u64 = self.max_ordinal_present();
20880            encoder.write_num(max_ordinal, offset);
20881            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
20882            // Calling encoder.out_of_line_offset(0) is not allowed.
20883            if max_ordinal == 0 {
20884                return Ok(());
20885            }
20886            depth.increment()?;
20887            let envelope_size = 8;
20888            let bytes_len = max_ordinal as usize * envelope_size;
20889            #[allow(unused_variables)]
20890            let offset = encoder.out_of_line_offset(bytes_len);
20891            let mut _prev_end_offset: usize = 0;
20892            if 1 > max_ordinal {
20893                return Ok(());
20894            }
20895
20896            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
20897            // are envelope_size bytes.
20898            let cur_offset: usize = (1 - 1) * envelope_size;
20899
20900            // Zero reserved fields.
20901            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
20902
20903            // Safety:
20904            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
20905            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
20906            //   envelope_size bytes, there is always sufficient room.
20907            fidl::encoding::encode_in_envelope_optional::<
20908                fidl::encoding::HandleType<
20909                    fidl::Event,
20910                    { fidl::ObjectType::EVENT.into_raw() },
20911                    2147483648,
20912                >,
20913                fidl::encoding::DefaultFuchsiaResourceDialect,
20914            >(
20915                self.node_ref.as_mut().map(
20916                    <fidl::encoding::HandleType<
20917                        fidl::Event,
20918                        { fidl::ObjectType::EVENT.into_raw() },
20919                        2147483648,
20920                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
20921                ),
20922                encoder,
20923                offset + cur_offset,
20924                depth,
20925            )?;
20926
20927            _prev_end_offset = cur_offset + envelope_size;
20928
20929            Ok(())
20930        }
20931    }
20932
20933    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
20934        for NodeIsAlternateForRequest
20935    {
20936        #[inline(always)]
20937        fn new_empty() -> Self {
20938            Self::default()
20939        }
20940
20941        unsafe fn decode(
20942            &mut self,
20943            decoder: &mut fidl::encoding::Decoder<
20944                '_,
20945                fidl::encoding::DefaultFuchsiaResourceDialect,
20946            >,
20947            offset: usize,
20948            mut depth: fidl::encoding::Depth,
20949        ) -> fidl::Result<()> {
20950            decoder.debug_check_bounds::<Self>(offset);
20951            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
20952                None => return Err(fidl::Error::NotNullable),
20953                Some(len) => len,
20954            };
20955            // Calling decoder.out_of_line_offset(0) is not allowed.
20956            if len == 0 {
20957                return Ok(());
20958            };
20959            depth.increment()?;
20960            let envelope_size = 8;
20961            let bytes_len = len * envelope_size;
20962            let offset = decoder.out_of_line_offset(bytes_len)?;
20963            // Decode the envelope for each type.
20964            let mut _next_ordinal_to_read = 0;
20965            let mut next_offset = offset;
20966            let end_offset = offset + bytes_len;
20967            _next_ordinal_to_read += 1;
20968            if next_offset >= end_offset {
20969                return Ok(());
20970            }
20971
20972            // Decode unknown envelopes for gaps in ordinals.
20973            while _next_ordinal_to_read < 1 {
20974                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
20975                _next_ordinal_to_read += 1;
20976                next_offset += envelope_size;
20977            }
20978
20979            let next_out_of_line = decoder.next_out_of_line();
20980            let handles_before = decoder.remaining_handles();
20981            if let Some((inlined, num_bytes, num_handles)) =
20982                fidl::encoding::decode_envelope_header(decoder, next_offset)?
20983            {
20984                let member_inline_size = <fidl::encoding::HandleType<
20985                    fidl::Event,
20986                    { fidl::ObjectType::EVENT.into_raw() },
20987                    2147483648,
20988                > as fidl::encoding::TypeMarker>::inline_size(
20989                    decoder.context
20990                );
20991                if inlined != (member_inline_size <= 4) {
20992                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
20993                }
20994                let inner_offset;
20995                let mut inner_depth = depth.clone();
20996                if inlined {
20997                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
20998                    inner_offset = next_offset;
20999                } else {
21000                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21001                    inner_depth.increment()?;
21002                }
21003                let val_ref =
21004                self.node_ref.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21005                fidl::decode!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21006                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21007                {
21008                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21009                }
21010                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21011                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21012                }
21013            }
21014
21015            next_offset += envelope_size;
21016
21017            // Decode the remaining unknown envelopes.
21018            while next_offset < end_offset {
21019                _next_ordinal_to_read += 1;
21020                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21021                next_offset += envelope_size;
21022            }
21023
21024            Ok(())
21025        }
21026    }
21027
21028    impl NodeSetWeakOkRequest {
21029        #[inline(always)]
21030        fn max_ordinal_present(&self) -> u64 {
21031            if let Some(_) = self.for_child_nodes_also {
21032                return 1;
21033            }
21034            0
21035        }
21036    }
21037
21038    impl fidl::encoding::ResourceTypeMarker for NodeSetWeakOkRequest {
21039        type Borrowed<'a> = &'a mut Self;
21040        fn take_or_borrow<'a>(
21041            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21042        ) -> Self::Borrowed<'a> {
21043            value
21044        }
21045    }
21046
21047    unsafe impl fidl::encoding::TypeMarker for NodeSetWeakOkRequest {
21048        type Owned = Self;
21049
21050        #[inline(always)]
21051        fn inline_align(_context: fidl::encoding::Context) -> usize {
21052            8
21053        }
21054
21055        #[inline(always)]
21056        fn inline_size(_context: fidl::encoding::Context) -> usize {
21057            16
21058        }
21059    }
21060
21061    unsafe impl
21062        fidl::encoding::Encode<NodeSetWeakOkRequest, fidl::encoding::DefaultFuchsiaResourceDialect>
21063        for &mut NodeSetWeakOkRequest
21064    {
21065        unsafe fn encode(
21066            self,
21067            encoder: &mut fidl::encoding::Encoder<
21068                '_,
21069                fidl::encoding::DefaultFuchsiaResourceDialect,
21070            >,
21071            offset: usize,
21072            mut depth: fidl::encoding::Depth,
21073        ) -> fidl::Result<()> {
21074            encoder.debug_check_bounds::<NodeSetWeakOkRequest>(offset);
21075            // Vector header
21076            let max_ordinal: u64 = self.max_ordinal_present();
21077            encoder.write_num(max_ordinal, offset);
21078            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21079            // Calling encoder.out_of_line_offset(0) is not allowed.
21080            if max_ordinal == 0 {
21081                return Ok(());
21082            }
21083            depth.increment()?;
21084            let envelope_size = 8;
21085            let bytes_len = max_ordinal as usize * envelope_size;
21086            #[allow(unused_variables)]
21087            let offset = encoder.out_of_line_offset(bytes_len);
21088            let mut _prev_end_offset: usize = 0;
21089            if 1 > max_ordinal {
21090                return Ok(());
21091            }
21092
21093            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21094            // are envelope_size bytes.
21095            let cur_offset: usize = (1 - 1) * envelope_size;
21096
21097            // Zero reserved fields.
21098            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21099
21100            // Safety:
21101            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21102            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21103            //   envelope_size bytes, there is always sufficient room.
21104            fidl::encoding::encode_in_envelope_optional::<
21105                bool,
21106                fidl::encoding::DefaultFuchsiaResourceDialect,
21107            >(
21108                self.for_child_nodes_also
21109                    .as_ref()
21110                    .map(<bool as fidl::encoding::ValueTypeMarker>::borrow),
21111                encoder,
21112                offset + cur_offset,
21113                depth,
21114            )?;
21115
21116            _prev_end_offset = cur_offset + envelope_size;
21117
21118            Ok(())
21119        }
21120    }
21121
21122    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21123        for NodeSetWeakOkRequest
21124    {
21125        #[inline(always)]
21126        fn new_empty() -> Self {
21127            Self::default()
21128        }
21129
21130        unsafe fn decode(
21131            &mut self,
21132            decoder: &mut fidl::encoding::Decoder<
21133                '_,
21134                fidl::encoding::DefaultFuchsiaResourceDialect,
21135            >,
21136            offset: usize,
21137            mut depth: fidl::encoding::Depth,
21138        ) -> fidl::Result<()> {
21139            decoder.debug_check_bounds::<Self>(offset);
21140            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21141                None => return Err(fidl::Error::NotNullable),
21142                Some(len) => len,
21143            };
21144            // Calling decoder.out_of_line_offset(0) is not allowed.
21145            if len == 0 {
21146                return Ok(());
21147            };
21148            depth.increment()?;
21149            let envelope_size = 8;
21150            let bytes_len = len * envelope_size;
21151            let offset = decoder.out_of_line_offset(bytes_len)?;
21152            // Decode the envelope for each type.
21153            let mut _next_ordinal_to_read = 0;
21154            let mut next_offset = offset;
21155            let end_offset = offset + bytes_len;
21156            _next_ordinal_to_read += 1;
21157            if next_offset >= end_offset {
21158                return Ok(());
21159            }
21160
21161            // Decode unknown envelopes for gaps in ordinals.
21162            while _next_ordinal_to_read < 1 {
21163                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21164                _next_ordinal_to_read += 1;
21165                next_offset += envelope_size;
21166            }
21167
21168            let next_out_of_line = decoder.next_out_of_line();
21169            let handles_before = decoder.remaining_handles();
21170            if let Some((inlined, num_bytes, num_handles)) =
21171                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21172            {
21173                let member_inline_size =
21174                    <bool as fidl::encoding::TypeMarker>::inline_size(decoder.context);
21175                if inlined != (member_inline_size <= 4) {
21176                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21177                }
21178                let inner_offset;
21179                let mut inner_depth = depth.clone();
21180                if inlined {
21181                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21182                    inner_offset = next_offset;
21183                } else {
21184                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21185                    inner_depth.increment()?;
21186                }
21187                let val_ref = self.for_child_nodes_also.get_or_insert_with(|| {
21188                    fidl::new_empty!(bool, fidl::encoding::DefaultFuchsiaResourceDialect)
21189                });
21190                fidl::decode!(
21191                    bool,
21192                    fidl::encoding::DefaultFuchsiaResourceDialect,
21193                    val_ref,
21194                    decoder,
21195                    inner_offset,
21196                    inner_depth
21197                )?;
21198                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21199                {
21200                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21201                }
21202                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21203                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21204                }
21205            }
21206
21207            next_offset += envelope_size;
21208
21209            // Decode the remaining unknown envelopes.
21210            while next_offset < end_offset {
21211                _next_ordinal_to_read += 1;
21212                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21213                next_offset += envelope_size;
21214            }
21215
21216            Ok(())
21217        }
21218    }
21219
21220    impl NodeGetNodeRefResponse {
21221        #[inline(always)]
21222        fn max_ordinal_present(&self) -> u64 {
21223            if let Some(_) = self.node_ref {
21224                return 1;
21225            }
21226            0
21227        }
21228    }
21229
21230    impl fidl::encoding::ResourceTypeMarker for NodeGetNodeRefResponse {
21231        type Borrowed<'a> = &'a mut Self;
21232        fn take_or_borrow<'a>(
21233            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21234        ) -> Self::Borrowed<'a> {
21235            value
21236        }
21237    }
21238
21239    unsafe impl fidl::encoding::TypeMarker for NodeGetNodeRefResponse {
21240        type Owned = Self;
21241
21242        #[inline(always)]
21243        fn inline_align(_context: fidl::encoding::Context) -> usize {
21244            8
21245        }
21246
21247        #[inline(always)]
21248        fn inline_size(_context: fidl::encoding::Context) -> usize {
21249            16
21250        }
21251    }
21252
21253    unsafe impl
21254        fidl::encoding::Encode<
21255            NodeGetNodeRefResponse,
21256            fidl::encoding::DefaultFuchsiaResourceDialect,
21257        > for &mut NodeGetNodeRefResponse
21258    {
21259        unsafe fn encode(
21260            self,
21261            encoder: &mut fidl::encoding::Encoder<
21262                '_,
21263                fidl::encoding::DefaultFuchsiaResourceDialect,
21264            >,
21265            offset: usize,
21266            mut depth: fidl::encoding::Depth,
21267        ) -> fidl::Result<()> {
21268            encoder.debug_check_bounds::<NodeGetNodeRefResponse>(offset);
21269            // Vector header
21270            let max_ordinal: u64 = self.max_ordinal_present();
21271            encoder.write_num(max_ordinal, offset);
21272            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21273            // Calling encoder.out_of_line_offset(0) is not allowed.
21274            if max_ordinal == 0 {
21275                return Ok(());
21276            }
21277            depth.increment()?;
21278            let envelope_size = 8;
21279            let bytes_len = max_ordinal as usize * envelope_size;
21280            #[allow(unused_variables)]
21281            let offset = encoder.out_of_line_offset(bytes_len);
21282            let mut _prev_end_offset: usize = 0;
21283            if 1 > max_ordinal {
21284                return Ok(());
21285            }
21286
21287            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21288            // are envelope_size bytes.
21289            let cur_offset: usize = (1 - 1) * envelope_size;
21290
21291            // Zero reserved fields.
21292            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21293
21294            // Safety:
21295            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21296            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21297            //   envelope_size bytes, there is always sufficient room.
21298            fidl::encoding::encode_in_envelope_optional::<
21299                fidl::encoding::HandleType<
21300                    fidl::Event,
21301                    { fidl::ObjectType::EVENT.into_raw() },
21302                    2147483648,
21303                >,
21304                fidl::encoding::DefaultFuchsiaResourceDialect,
21305            >(
21306                self.node_ref.as_mut().map(
21307                    <fidl::encoding::HandleType<
21308                        fidl::Event,
21309                        { fidl::ObjectType::EVENT.into_raw() },
21310                        2147483648,
21311                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21312                ),
21313                encoder,
21314                offset + cur_offset,
21315                depth,
21316            )?;
21317
21318            _prev_end_offset = cur_offset + envelope_size;
21319
21320            Ok(())
21321        }
21322    }
21323
21324    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
21325        for NodeGetNodeRefResponse
21326    {
21327        #[inline(always)]
21328        fn new_empty() -> Self {
21329            Self::default()
21330        }
21331
21332        unsafe fn decode(
21333            &mut self,
21334            decoder: &mut fidl::encoding::Decoder<
21335                '_,
21336                fidl::encoding::DefaultFuchsiaResourceDialect,
21337            >,
21338            offset: usize,
21339            mut depth: fidl::encoding::Depth,
21340        ) -> fidl::Result<()> {
21341            decoder.debug_check_bounds::<Self>(offset);
21342            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21343                None => return Err(fidl::Error::NotNullable),
21344                Some(len) => len,
21345            };
21346            // Calling decoder.out_of_line_offset(0) is not allowed.
21347            if len == 0 {
21348                return Ok(());
21349            };
21350            depth.increment()?;
21351            let envelope_size = 8;
21352            let bytes_len = len * envelope_size;
21353            let offset = decoder.out_of_line_offset(bytes_len)?;
21354            // Decode the envelope for each type.
21355            let mut _next_ordinal_to_read = 0;
21356            let mut next_offset = offset;
21357            let end_offset = offset + bytes_len;
21358            _next_ordinal_to_read += 1;
21359            if next_offset >= end_offset {
21360                return Ok(());
21361            }
21362
21363            // Decode unknown envelopes for gaps in ordinals.
21364            while _next_ordinal_to_read < 1 {
21365                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21366                _next_ordinal_to_read += 1;
21367                next_offset += envelope_size;
21368            }
21369
21370            let next_out_of_line = decoder.next_out_of_line();
21371            let handles_before = decoder.remaining_handles();
21372            if let Some((inlined, num_bytes, num_handles)) =
21373                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21374            {
21375                let member_inline_size = <fidl::encoding::HandleType<
21376                    fidl::Event,
21377                    { fidl::ObjectType::EVENT.into_raw() },
21378                    2147483648,
21379                > as fidl::encoding::TypeMarker>::inline_size(
21380                    decoder.context
21381                );
21382                if inlined != (member_inline_size <= 4) {
21383                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21384                }
21385                let inner_offset;
21386                let mut inner_depth = depth.clone();
21387                if inlined {
21388                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21389                    inner_offset = next_offset;
21390                } else {
21391                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21392                    inner_depth.increment()?;
21393                }
21394                let val_ref =
21395                self.node_ref.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21396                fidl::decode!(fidl::encoding::HandleType<fidl::Event, { fidl::ObjectType::EVENT.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21397                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21398                {
21399                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21400                }
21401                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21402                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21403                }
21404            }
21405
21406            next_offset += envelope_size;
21407
21408            // Decode the remaining unknown envelopes.
21409            while next_offset < end_offset {
21410                _next_ordinal_to_read += 1;
21411                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21412                next_offset += envelope_size;
21413            }
21414
21415            Ok(())
21416        }
21417    }
21418
21419    impl VmoBuffer {
21420        #[inline(always)]
21421        fn max_ordinal_present(&self) -> u64 {
21422            if let Some(_) = self.close_weak_asap {
21423                return 3;
21424            }
21425            if let Some(_) = self.vmo_usable_start {
21426                return 2;
21427            }
21428            if let Some(_) = self.vmo {
21429                return 1;
21430            }
21431            0
21432        }
21433    }
21434
21435    impl fidl::encoding::ResourceTypeMarker for VmoBuffer {
21436        type Borrowed<'a> = &'a mut Self;
21437        fn take_or_borrow<'a>(
21438            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
21439        ) -> Self::Borrowed<'a> {
21440            value
21441        }
21442    }
21443
21444    unsafe impl fidl::encoding::TypeMarker for VmoBuffer {
21445        type Owned = Self;
21446
21447        #[inline(always)]
21448        fn inline_align(_context: fidl::encoding::Context) -> usize {
21449            8
21450        }
21451
21452        #[inline(always)]
21453        fn inline_size(_context: fidl::encoding::Context) -> usize {
21454            16
21455        }
21456    }
21457
21458    unsafe impl fidl::encoding::Encode<VmoBuffer, fidl::encoding::DefaultFuchsiaResourceDialect>
21459        for &mut VmoBuffer
21460    {
21461        unsafe fn encode(
21462            self,
21463            encoder: &mut fidl::encoding::Encoder<
21464                '_,
21465                fidl::encoding::DefaultFuchsiaResourceDialect,
21466            >,
21467            offset: usize,
21468            mut depth: fidl::encoding::Depth,
21469        ) -> fidl::Result<()> {
21470            encoder.debug_check_bounds::<VmoBuffer>(offset);
21471            // Vector header
21472            let max_ordinal: u64 = self.max_ordinal_present();
21473            encoder.write_num(max_ordinal, offset);
21474            encoder.write_num(fidl::encoding::ALLOC_PRESENT_U64, offset + 8);
21475            // Calling encoder.out_of_line_offset(0) is not allowed.
21476            if max_ordinal == 0 {
21477                return Ok(());
21478            }
21479            depth.increment()?;
21480            let envelope_size = 8;
21481            let bytes_len = max_ordinal as usize * envelope_size;
21482            #[allow(unused_variables)]
21483            let offset = encoder.out_of_line_offset(bytes_len);
21484            let mut _prev_end_offset: usize = 0;
21485            if 1 > max_ordinal {
21486                return Ok(());
21487            }
21488
21489            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21490            // are envelope_size bytes.
21491            let cur_offset: usize = (1 - 1) * envelope_size;
21492
21493            // Zero reserved fields.
21494            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21495
21496            // Safety:
21497            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21498            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21499            //   envelope_size bytes, there is always sufficient room.
21500            fidl::encoding::encode_in_envelope_optional::<
21501                fidl::encoding::HandleType<
21502                    fidl::Vmo,
21503                    { fidl::ObjectType::VMO.into_raw() },
21504                    2147483648,
21505                >,
21506                fidl::encoding::DefaultFuchsiaResourceDialect,
21507            >(
21508                self.vmo.as_mut().map(
21509                    <fidl::encoding::HandleType<
21510                        fidl::Vmo,
21511                        { fidl::ObjectType::VMO.into_raw() },
21512                        2147483648,
21513                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21514                ),
21515                encoder,
21516                offset + cur_offset,
21517                depth,
21518            )?;
21519
21520            _prev_end_offset = cur_offset + envelope_size;
21521            if 2 > max_ordinal {
21522                return Ok(());
21523            }
21524
21525            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21526            // are envelope_size bytes.
21527            let cur_offset: usize = (2 - 1) * envelope_size;
21528
21529            // Zero reserved fields.
21530            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21531
21532            // Safety:
21533            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21534            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21535            //   envelope_size bytes, there is always sufficient room.
21536            fidl::encoding::encode_in_envelope_optional::<
21537                u64,
21538                fidl::encoding::DefaultFuchsiaResourceDialect,
21539            >(
21540                self.vmo_usable_start
21541                    .as_ref()
21542                    .map(<u64 as fidl::encoding::ValueTypeMarker>::borrow),
21543                encoder,
21544                offset + cur_offset,
21545                depth,
21546            )?;
21547
21548            _prev_end_offset = cur_offset + envelope_size;
21549            if 3 > max_ordinal {
21550                return Ok(());
21551            }
21552
21553            // Write at offset+(ordinal-1)*envelope_size, since ordinals are one-based and envelopes
21554            // are envelope_size bytes.
21555            let cur_offset: usize = (3 - 1) * envelope_size;
21556
21557            // Zero reserved fields.
21558            encoder.padding(offset + _prev_end_offset, cur_offset - _prev_end_offset);
21559
21560            // Safety:
21561            // - bytes_len is calculated to fit envelope_size*max(member.ordinal).
21562            // - Since cur_offset is envelope_size*(member.ordinal - 1) and the envelope takes
21563            //   envelope_size bytes, there is always sufficient room.
21564            fidl::encoding::encode_in_envelope_optional::<
21565                fidl::encoding::HandleType<
21566                    fidl::EventPair,
21567                    { fidl::ObjectType::EVENTPAIR.into_raw() },
21568                    2147483648,
21569                >,
21570                fidl::encoding::DefaultFuchsiaResourceDialect,
21571            >(
21572                self.close_weak_asap.as_mut().map(
21573                    <fidl::encoding::HandleType<
21574                        fidl::EventPair,
21575                        { fidl::ObjectType::EVENTPAIR.into_raw() },
21576                        2147483648,
21577                    > as fidl::encoding::ResourceTypeMarker>::take_or_borrow,
21578                ),
21579                encoder,
21580                offset + cur_offset,
21581                depth,
21582            )?;
21583
21584            _prev_end_offset = cur_offset + envelope_size;
21585
21586            Ok(())
21587        }
21588    }
21589
21590    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect> for VmoBuffer {
21591        #[inline(always)]
21592        fn new_empty() -> Self {
21593            Self::default()
21594        }
21595
21596        unsafe fn decode(
21597            &mut self,
21598            decoder: &mut fidl::encoding::Decoder<
21599                '_,
21600                fidl::encoding::DefaultFuchsiaResourceDialect,
21601            >,
21602            offset: usize,
21603            mut depth: fidl::encoding::Depth,
21604        ) -> fidl::Result<()> {
21605            decoder.debug_check_bounds::<Self>(offset);
21606            let len = match fidl::encoding::decode_vector_header(decoder, offset)? {
21607                None => return Err(fidl::Error::NotNullable),
21608                Some(len) => len,
21609            };
21610            // Calling decoder.out_of_line_offset(0) is not allowed.
21611            if len == 0 {
21612                return Ok(());
21613            };
21614            depth.increment()?;
21615            let envelope_size = 8;
21616            let bytes_len = len * envelope_size;
21617            let offset = decoder.out_of_line_offset(bytes_len)?;
21618            // Decode the envelope for each type.
21619            let mut _next_ordinal_to_read = 0;
21620            let mut next_offset = offset;
21621            let end_offset = offset + bytes_len;
21622            _next_ordinal_to_read += 1;
21623            if next_offset >= end_offset {
21624                return Ok(());
21625            }
21626
21627            // Decode unknown envelopes for gaps in ordinals.
21628            while _next_ordinal_to_read < 1 {
21629                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21630                _next_ordinal_to_read += 1;
21631                next_offset += envelope_size;
21632            }
21633
21634            let next_out_of_line = decoder.next_out_of_line();
21635            let handles_before = decoder.remaining_handles();
21636            if let Some((inlined, num_bytes, num_handles)) =
21637                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21638            {
21639                let member_inline_size = <fidl::encoding::HandleType<
21640                    fidl::Vmo,
21641                    { fidl::ObjectType::VMO.into_raw() },
21642                    2147483648,
21643                > as fidl::encoding::TypeMarker>::inline_size(
21644                    decoder.context
21645                );
21646                if inlined != (member_inline_size <= 4) {
21647                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21648                }
21649                let inner_offset;
21650                let mut inner_depth = depth.clone();
21651                if inlined {
21652                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21653                    inner_offset = next_offset;
21654                } else {
21655                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21656                    inner_depth.increment()?;
21657                }
21658                let val_ref =
21659                self.vmo.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21660                fidl::decode!(fidl::encoding::HandleType<fidl::Vmo, { fidl::ObjectType::VMO.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21661                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21662                {
21663                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21664                }
21665                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21666                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21667                }
21668            }
21669
21670            next_offset += envelope_size;
21671            _next_ordinal_to_read += 1;
21672            if next_offset >= end_offset {
21673                return Ok(());
21674            }
21675
21676            // Decode unknown envelopes for gaps in ordinals.
21677            while _next_ordinal_to_read < 2 {
21678                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21679                _next_ordinal_to_read += 1;
21680                next_offset += envelope_size;
21681            }
21682
21683            let next_out_of_line = decoder.next_out_of_line();
21684            let handles_before = decoder.remaining_handles();
21685            if let Some((inlined, num_bytes, num_handles)) =
21686                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21687            {
21688                let member_inline_size =
21689                    <u64 as fidl::encoding::TypeMarker>::inline_size(decoder.context);
21690                if inlined != (member_inline_size <= 4) {
21691                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21692                }
21693                let inner_offset;
21694                let mut inner_depth = depth.clone();
21695                if inlined {
21696                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21697                    inner_offset = next_offset;
21698                } else {
21699                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21700                    inner_depth.increment()?;
21701                }
21702                let val_ref = self.vmo_usable_start.get_or_insert_with(|| {
21703                    fidl::new_empty!(u64, fidl::encoding::DefaultFuchsiaResourceDialect)
21704                });
21705                fidl::decode!(
21706                    u64,
21707                    fidl::encoding::DefaultFuchsiaResourceDialect,
21708                    val_ref,
21709                    decoder,
21710                    inner_offset,
21711                    inner_depth
21712                )?;
21713                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21714                {
21715                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21716                }
21717                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21718                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21719                }
21720            }
21721
21722            next_offset += envelope_size;
21723            _next_ordinal_to_read += 1;
21724            if next_offset >= end_offset {
21725                return Ok(());
21726            }
21727
21728            // Decode unknown envelopes for gaps in ordinals.
21729            while _next_ordinal_to_read < 3 {
21730                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21731                _next_ordinal_to_read += 1;
21732                next_offset += envelope_size;
21733            }
21734
21735            let next_out_of_line = decoder.next_out_of_line();
21736            let handles_before = decoder.remaining_handles();
21737            if let Some((inlined, num_bytes, num_handles)) =
21738                fidl::encoding::decode_envelope_header(decoder, next_offset)?
21739            {
21740                let member_inline_size = <fidl::encoding::HandleType<
21741                    fidl::EventPair,
21742                    { fidl::ObjectType::EVENTPAIR.into_raw() },
21743                    2147483648,
21744                > as fidl::encoding::TypeMarker>::inline_size(
21745                    decoder.context
21746                );
21747                if inlined != (member_inline_size <= 4) {
21748                    return Err(fidl::Error::InvalidInlineBitInEnvelope);
21749                }
21750                let inner_offset;
21751                let mut inner_depth = depth.clone();
21752                if inlined {
21753                    decoder.check_inline_envelope_padding(next_offset, member_inline_size)?;
21754                    inner_offset = next_offset;
21755                } else {
21756                    inner_offset = decoder.out_of_line_offset(member_inline_size)?;
21757                    inner_depth.increment()?;
21758                }
21759                let val_ref =
21760                self.close_weak_asap.get_or_insert_with(|| fidl::new_empty!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect));
21761                fidl::decode!(fidl::encoding::HandleType<fidl::EventPair, { fidl::ObjectType::EVENTPAIR.into_raw() }, 2147483648>, fidl::encoding::DefaultFuchsiaResourceDialect, val_ref, decoder, inner_offset, inner_depth)?;
21762                if !inlined && decoder.next_out_of_line() != next_out_of_line + (num_bytes as usize)
21763                {
21764                    return Err(fidl::Error::InvalidNumBytesInEnvelope);
21765                }
21766                if handles_before != decoder.remaining_handles() + (num_handles as usize) {
21767                    return Err(fidl::Error::InvalidNumHandlesInEnvelope);
21768                }
21769            }
21770
21771            next_offset += envelope_size;
21772
21773            // Decode the remaining unknown envelopes.
21774            while next_offset < end_offset {
21775                _next_ordinal_to_read += 1;
21776                fidl::encoding::decode_unknown_envelope(decoder, next_offset, depth)?;
21777                next_offset += envelope_size;
21778            }
21779
21780            Ok(())
21781        }
21782    }
21783}