fidl_fuchsia_memorypressure/
fidl_fuchsia_memorypressure.rs

1// WARNING: This file is machine generated by fidlgen.
2
3#![warn(clippy::all)]
4#![allow(unused_parens, unused_mut, unused_imports, nonstandard_style)]
5
6use bitflags::bitflags;
7use fidl::client::QueryResponseFut;
8use fidl::encoding::{MessageBufFor, ProxyChannelBox, ResourceDialect};
9use fidl::endpoints::{ControlHandle as _, Responder as _};
10pub use fidl_fuchsia_memorypressure__common::*;
11use futures::future::{self, MaybeDone, TryFutureExt};
12use zx_status;
13
14#[derive(Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
15pub struct ProviderRegisterWatcherRequest {
16    pub watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
17}
18
19impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
20    for ProviderRegisterWatcherRequest
21{
22}
23
24#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
25pub struct ProviderMarker;
26
27impl fidl::endpoints::ProtocolMarker for ProviderMarker {
28    type Proxy = ProviderProxy;
29    type RequestStream = ProviderRequestStream;
30    #[cfg(target_os = "fuchsia")]
31    type SynchronousProxy = ProviderSynchronousProxy;
32
33    const DEBUG_NAME: &'static str = "fuchsia.memorypressure.Provider";
34}
35impl fidl::endpoints::DiscoverableProtocolMarker for ProviderMarker {}
36
37pub trait ProviderProxyInterface: Send + Sync {
38    fn r#register_watcher(
39        &self,
40        watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
41    ) -> Result<(), fidl::Error>;
42}
43#[derive(Debug)]
44#[cfg(target_os = "fuchsia")]
45pub struct ProviderSynchronousProxy {
46    client: fidl::client::sync::Client,
47}
48
49#[cfg(target_os = "fuchsia")]
50impl fidl::endpoints::SynchronousProxy for ProviderSynchronousProxy {
51    type Proxy = ProviderProxy;
52    type Protocol = ProviderMarker;
53
54    fn from_channel(inner: fidl::Channel) -> Self {
55        Self::new(inner)
56    }
57
58    fn into_channel(self) -> fidl::Channel {
59        self.client.into_channel()
60    }
61
62    fn as_channel(&self) -> &fidl::Channel {
63        self.client.as_channel()
64    }
65}
66
67#[cfg(target_os = "fuchsia")]
68impl ProviderSynchronousProxy {
69    pub fn new(channel: fidl::Channel) -> Self {
70        let protocol_name = <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
71        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
72    }
73
74    pub fn into_channel(self) -> fidl::Channel {
75        self.client.into_channel()
76    }
77
78    /// Waits until an event arrives and returns it. It is safe for other
79    /// threads to make concurrent requests while waiting for an event.
80    pub fn wait_for_event(
81        &self,
82        deadline: zx::MonotonicInstant,
83    ) -> Result<ProviderEvent, fidl::Error> {
84        ProviderEvent::decode(self.client.wait_for_event(deadline)?)
85    }
86
87    /// Used to register for memory pressure level changes.
88    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
89    /// level change messages to the client.
90    ///
91    /// The current memory pressure level is immediately sent to the watcher
92    /// when this method is called.
93    ///
94    /// It is recommended that the root job in a component tree register for changes,
95    /// rather than having individual jobs further down the tree register individually.
96    /// A low client count will help minimize system churn due to a large number of
97    /// memory pressure messages in transit at the same time.
98    /// Also, the more context a job has, the better equipped it will be to react to
99    /// memory pressure by controlling the behavior of children jobs in its tree.
100    pub fn r#register_watcher(
101        &self,
102        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
103    ) -> Result<(), fidl::Error> {
104        self.client.send::<ProviderRegisterWatcherRequest>(
105            (watcher,),
106            0x91e65af25aae4a9,
107            fidl::encoding::DynamicFlags::empty(),
108        )
109    }
110}
111
112#[cfg(target_os = "fuchsia")]
113impl From<ProviderSynchronousProxy> for zx::NullableHandle {
114    fn from(value: ProviderSynchronousProxy) -> Self {
115        value.into_channel().into()
116    }
117}
118
119#[cfg(target_os = "fuchsia")]
120impl From<fidl::Channel> for ProviderSynchronousProxy {
121    fn from(value: fidl::Channel) -> Self {
122        Self::new(value)
123    }
124}
125
126#[cfg(target_os = "fuchsia")]
127impl fidl::endpoints::FromClient for ProviderSynchronousProxy {
128    type Protocol = ProviderMarker;
129
130    fn from_client(value: fidl::endpoints::ClientEnd<ProviderMarker>) -> Self {
131        Self::new(value.into_channel())
132    }
133}
134
135#[derive(Debug, Clone)]
136pub struct ProviderProxy {
137    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
138}
139
140impl fidl::endpoints::Proxy for ProviderProxy {
141    type Protocol = ProviderMarker;
142
143    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
144        Self::new(inner)
145    }
146
147    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
148        self.client.into_channel().map_err(|client| Self { client })
149    }
150
151    fn as_channel(&self) -> &::fidl::AsyncChannel {
152        self.client.as_channel()
153    }
154}
155
156impl ProviderProxy {
157    /// Create a new Proxy for fuchsia.memorypressure/Provider.
158    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
159        let protocol_name = <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
160        Self { client: fidl::client::Client::new(channel, protocol_name) }
161    }
162
163    /// Get a Stream of events from the remote end of the protocol.
164    ///
165    /// # Panics
166    ///
167    /// Panics if the event stream was already taken.
168    pub fn take_event_stream(&self) -> ProviderEventStream {
169        ProviderEventStream { event_receiver: self.client.take_event_receiver() }
170    }
171
172    /// Used to register for memory pressure level changes.
173    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
174    /// level change messages to the client.
175    ///
176    /// The current memory pressure level is immediately sent to the watcher
177    /// when this method is called.
178    ///
179    /// It is recommended that the root job in a component tree register for changes,
180    /// rather than having individual jobs further down the tree register individually.
181    /// A low client count will help minimize system churn due to a large number of
182    /// memory pressure messages in transit at the same time.
183    /// Also, the more context a job has, the better equipped it will be to react to
184    /// memory pressure by controlling the behavior of children jobs in its tree.
185    pub fn r#register_watcher(
186        &self,
187        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
188    ) -> Result<(), fidl::Error> {
189        ProviderProxyInterface::r#register_watcher(self, watcher)
190    }
191}
192
193impl ProviderProxyInterface for ProviderProxy {
194    fn r#register_watcher(
195        &self,
196        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
197    ) -> Result<(), fidl::Error> {
198        self.client.send::<ProviderRegisterWatcherRequest>(
199            (watcher,),
200            0x91e65af25aae4a9,
201            fidl::encoding::DynamicFlags::empty(),
202        )
203    }
204}
205
206pub struct ProviderEventStream {
207    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
208}
209
210impl std::marker::Unpin for ProviderEventStream {}
211
212impl futures::stream::FusedStream for ProviderEventStream {
213    fn is_terminated(&self) -> bool {
214        self.event_receiver.is_terminated()
215    }
216}
217
218impl futures::Stream for ProviderEventStream {
219    type Item = Result<ProviderEvent, fidl::Error>;
220
221    fn poll_next(
222        mut self: std::pin::Pin<&mut Self>,
223        cx: &mut std::task::Context<'_>,
224    ) -> std::task::Poll<Option<Self::Item>> {
225        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
226            &mut self.event_receiver,
227            cx
228        )?) {
229            Some(buf) => std::task::Poll::Ready(Some(ProviderEvent::decode(buf))),
230            None => std::task::Poll::Ready(None),
231        }
232    }
233}
234
235#[derive(Debug)]
236pub enum ProviderEvent {}
237
238impl ProviderEvent {
239    /// Decodes a message buffer as a [`ProviderEvent`].
240    fn decode(
241        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
242    ) -> Result<ProviderEvent, fidl::Error> {
243        let (bytes, _handles) = buf.split_mut();
244        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
245        debug_assert_eq!(tx_header.tx_id, 0);
246        match tx_header.ordinal {
247            _ => Err(fidl::Error::UnknownOrdinal {
248                ordinal: tx_header.ordinal,
249                protocol_name: <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
250            }),
251        }
252    }
253}
254
255/// A Stream of incoming requests for fuchsia.memorypressure/Provider.
256pub struct ProviderRequestStream {
257    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
258    is_terminated: bool,
259}
260
261impl std::marker::Unpin for ProviderRequestStream {}
262
263impl futures::stream::FusedStream for ProviderRequestStream {
264    fn is_terminated(&self) -> bool {
265        self.is_terminated
266    }
267}
268
269impl fidl::endpoints::RequestStream for ProviderRequestStream {
270    type Protocol = ProviderMarker;
271    type ControlHandle = ProviderControlHandle;
272
273    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
274        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
275    }
276
277    fn control_handle(&self) -> Self::ControlHandle {
278        ProviderControlHandle { inner: self.inner.clone() }
279    }
280
281    fn into_inner(
282        self,
283    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
284    {
285        (self.inner, self.is_terminated)
286    }
287
288    fn from_inner(
289        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
290        is_terminated: bool,
291    ) -> Self {
292        Self { inner, is_terminated }
293    }
294}
295
296impl futures::Stream for ProviderRequestStream {
297    type Item = Result<ProviderRequest, fidl::Error>;
298
299    fn poll_next(
300        mut self: std::pin::Pin<&mut Self>,
301        cx: &mut std::task::Context<'_>,
302    ) -> std::task::Poll<Option<Self::Item>> {
303        let this = &mut *self;
304        if this.inner.check_shutdown(cx) {
305            this.is_terminated = true;
306            return std::task::Poll::Ready(None);
307        }
308        if this.is_terminated {
309            panic!("polled ProviderRequestStream after completion");
310        }
311        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
312            |bytes, handles| {
313                match this.inner.channel().read_etc(cx, bytes, handles) {
314                    std::task::Poll::Ready(Ok(())) => {}
315                    std::task::Poll::Pending => return std::task::Poll::Pending,
316                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
317                        this.is_terminated = true;
318                        return std::task::Poll::Ready(None);
319                    }
320                    std::task::Poll::Ready(Err(e)) => {
321                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
322                            e.into(),
323                        ))));
324                    }
325                }
326
327                // A message has been received from the channel
328                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
329
330                std::task::Poll::Ready(Some(match header.ordinal {
331                    0x91e65af25aae4a9 => {
332                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
333                        let mut req = fidl::new_empty!(
334                            ProviderRegisterWatcherRequest,
335                            fidl::encoding::DefaultFuchsiaResourceDialect
336                        );
337                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<ProviderRegisterWatcherRequest>(&header, _body_bytes, handles, &mut req)?;
338                        let control_handle = ProviderControlHandle { inner: this.inner.clone() };
339                        Ok(ProviderRequest::RegisterWatcher {
340                            watcher: req.watcher,
341
342                            control_handle,
343                        })
344                    }
345                    _ => Err(fidl::Error::UnknownOrdinal {
346                        ordinal: header.ordinal,
347                        protocol_name:
348                            <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
349                    }),
350                }))
351            },
352        )
353    }
354}
355
356/// Registration protocol
357#[derive(Debug)]
358pub enum ProviderRequest {
359    /// Used to register for memory pressure level changes.
360    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
361    /// level change messages to the client.
362    ///
363    /// The current memory pressure level is immediately sent to the watcher
364    /// when this method is called.
365    ///
366    /// It is recommended that the root job in a component tree register for changes,
367    /// rather than having individual jobs further down the tree register individually.
368    /// A low client count will help minimize system churn due to a large number of
369    /// memory pressure messages in transit at the same time.
370    /// Also, the more context a job has, the better equipped it will be to react to
371    /// memory pressure by controlling the behavior of children jobs in its tree.
372    RegisterWatcher {
373        watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
374        control_handle: ProviderControlHandle,
375    },
376}
377
378impl ProviderRequest {
379    #[allow(irrefutable_let_patterns)]
380    pub fn into_register_watcher(
381        self,
382    ) -> Option<(fidl::endpoints::ClientEnd<WatcherMarker>, ProviderControlHandle)> {
383        if let ProviderRequest::RegisterWatcher { watcher, control_handle } = self {
384            Some((watcher, control_handle))
385        } else {
386            None
387        }
388    }
389
390    /// Name of the method defined in FIDL
391    pub fn method_name(&self) -> &'static str {
392        match *self {
393            ProviderRequest::RegisterWatcher { .. } => "register_watcher",
394        }
395    }
396}
397
398#[derive(Debug, Clone)]
399pub struct ProviderControlHandle {
400    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
401}
402
403impl fidl::endpoints::ControlHandle for ProviderControlHandle {
404    fn shutdown(&self) {
405        self.inner.shutdown()
406    }
407
408    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
409        self.inner.shutdown_with_epitaph(status)
410    }
411
412    fn is_closed(&self) -> bool {
413        self.inner.channel().is_closed()
414    }
415    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
416        self.inner.channel().on_closed()
417    }
418
419    #[cfg(target_os = "fuchsia")]
420    fn signal_peer(
421        &self,
422        clear_mask: zx::Signals,
423        set_mask: zx::Signals,
424    ) -> Result<(), zx_status::Status> {
425        use fidl::Peered;
426        self.inner.channel().signal_peer(clear_mask, set_mask)
427    }
428}
429
430impl ProviderControlHandle {}
431
432#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
433pub struct WatcherMarker;
434
435impl fidl::endpoints::ProtocolMarker for WatcherMarker {
436    type Proxy = WatcherProxy;
437    type RequestStream = WatcherRequestStream;
438    #[cfg(target_os = "fuchsia")]
439    type SynchronousProxy = WatcherSynchronousProxy;
440
441    const DEBUG_NAME: &'static str = "(anonymous) Watcher";
442}
443
444pub trait WatcherProxyInterface: Send + Sync {
445    type OnLevelChangedResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
446    fn r#on_level_changed(&self, level: Level) -> Self::OnLevelChangedResponseFut;
447}
448#[derive(Debug)]
449#[cfg(target_os = "fuchsia")]
450pub struct WatcherSynchronousProxy {
451    client: fidl::client::sync::Client,
452}
453
454#[cfg(target_os = "fuchsia")]
455impl fidl::endpoints::SynchronousProxy for WatcherSynchronousProxy {
456    type Proxy = WatcherProxy;
457    type Protocol = WatcherMarker;
458
459    fn from_channel(inner: fidl::Channel) -> Self {
460        Self::new(inner)
461    }
462
463    fn into_channel(self) -> fidl::Channel {
464        self.client.into_channel()
465    }
466
467    fn as_channel(&self) -> &fidl::Channel {
468        self.client.as_channel()
469    }
470}
471
472#[cfg(target_os = "fuchsia")]
473impl WatcherSynchronousProxy {
474    pub fn new(channel: fidl::Channel) -> Self {
475        let protocol_name = <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
476        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
477    }
478
479    pub fn into_channel(self) -> fidl::Channel {
480        self.client.into_channel()
481    }
482
483    /// Waits until an event arrives and returns it. It is safe for other
484    /// threads to make concurrent requests while waiting for an event.
485    pub fn wait_for_event(
486        &self,
487        deadline: zx::MonotonicInstant,
488    ) -> Result<WatcherEvent, fidl::Error> {
489        WatcherEvent::decode(self.client.wait_for_event(deadline)?)
490    }
491
492    /// Sent to the registered client when the memory pressure level changes.
493    /// `level`: indicates the current memory pressure level.
494    ///
495    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
496    /// registered client can discover the current memory pressure level.
497    ///
498    /// The watcher must immediately reply with a message to acknowledge that it has
499    /// received the level change notification, and has initiated required actions as a
500    /// result. It may then continue to reclaim memory asynchronously after sending
501    /// the acknowledgement.
502    ///
503    /// Some helpful guidelines for clients:
504    /// 1. The watcher will be notified of new pressure level changes only after a reply
505    /// corresponding to the previous message has been received by the provider.
506    /// If multiple level transitions occur during that time, the watcher will be
507    /// notified of the latest pressure level.
508    ///
509    /// 2. The level changes are edge-triggered, and clients are expected to maintain
510    /// local state to track the current pressure level, if required. For example,
511    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
512    /// Some time after this, it might want to trigger an activity that causes a
513    /// fair amount of memory to be allocated. At this point, the job is expected to
514    /// remember that the last pressure level it saw was CRITICAL, and refrain from
515    /// triggering the memory-intensive activity.
516    ///
517    /// 3. As a performance optimization, the provider may decide to skip sending
518    /// messages for some pressure level changes. For example, when oscillating across
519    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
520    /// single transition. The provider might rate-limit messages in this case.
521    /// On a similar note, the provider may decide to send repeated messages at the
522    /// same pressure level, particularly CRITICAL, to indicate that further action
523    /// needs to be taken.
524    pub fn r#on_level_changed(
525        &self,
526        mut level: Level,
527        ___deadline: zx::MonotonicInstant,
528    ) -> Result<(), fidl::Error> {
529        let _response =
530            self.client.send_query::<WatcherOnLevelChangedRequest, fidl::encoding::EmptyPayload>(
531                (level,),
532                0x55d559533407fed9,
533                fidl::encoding::DynamicFlags::empty(),
534                ___deadline,
535            )?;
536        Ok(_response)
537    }
538}
539
540#[cfg(target_os = "fuchsia")]
541impl From<WatcherSynchronousProxy> for zx::NullableHandle {
542    fn from(value: WatcherSynchronousProxy) -> Self {
543        value.into_channel().into()
544    }
545}
546
547#[cfg(target_os = "fuchsia")]
548impl From<fidl::Channel> for WatcherSynchronousProxy {
549    fn from(value: fidl::Channel) -> Self {
550        Self::new(value)
551    }
552}
553
554#[cfg(target_os = "fuchsia")]
555impl fidl::endpoints::FromClient for WatcherSynchronousProxy {
556    type Protocol = WatcherMarker;
557
558    fn from_client(value: fidl::endpoints::ClientEnd<WatcherMarker>) -> Self {
559        Self::new(value.into_channel())
560    }
561}
562
563#[derive(Debug, Clone)]
564pub struct WatcherProxy {
565    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
566}
567
568impl fidl::endpoints::Proxy for WatcherProxy {
569    type Protocol = WatcherMarker;
570
571    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
572        Self::new(inner)
573    }
574
575    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
576        self.client.into_channel().map_err(|client| Self { client })
577    }
578
579    fn as_channel(&self) -> &::fidl::AsyncChannel {
580        self.client.as_channel()
581    }
582}
583
584impl WatcherProxy {
585    /// Create a new Proxy for fuchsia.memorypressure/Watcher.
586    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
587        let protocol_name = <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
588        Self { client: fidl::client::Client::new(channel, protocol_name) }
589    }
590
591    /// Get a Stream of events from the remote end of the protocol.
592    ///
593    /// # Panics
594    ///
595    /// Panics if the event stream was already taken.
596    pub fn take_event_stream(&self) -> WatcherEventStream {
597        WatcherEventStream { event_receiver: self.client.take_event_receiver() }
598    }
599
600    /// Sent to the registered client when the memory pressure level changes.
601    /// `level`: indicates the current memory pressure level.
602    ///
603    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
604    /// registered client can discover the current memory pressure level.
605    ///
606    /// The watcher must immediately reply with a message to acknowledge that it has
607    /// received the level change notification, and has initiated required actions as a
608    /// result. It may then continue to reclaim memory asynchronously after sending
609    /// the acknowledgement.
610    ///
611    /// Some helpful guidelines for clients:
612    /// 1. The watcher will be notified of new pressure level changes only after a reply
613    /// corresponding to the previous message has been received by the provider.
614    /// If multiple level transitions occur during that time, the watcher will be
615    /// notified of the latest pressure level.
616    ///
617    /// 2. The level changes are edge-triggered, and clients are expected to maintain
618    /// local state to track the current pressure level, if required. For example,
619    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
620    /// Some time after this, it might want to trigger an activity that causes a
621    /// fair amount of memory to be allocated. At this point, the job is expected to
622    /// remember that the last pressure level it saw was CRITICAL, and refrain from
623    /// triggering the memory-intensive activity.
624    ///
625    /// 3. As a performance optimization, the provider may decide to skip sending
626    /// messages for some pressure level changes. For example, when oscillating across
627    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
628    /// single transition. The provider might rate-limit messages in this case.
629    /// On a similar note, the provider may decide to send repeated messages at the
630    /// same pressure level, particularly CRITICAL, to indicate that further action
631    /// needs to be taken.
632    pub fn r#on_level_changed(
633        &self,
634        mut level: Level,
635    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
636        WatcherProxyInterface::r#on_level_changed(self, level)
637    }
638}
639
640impl WatcherProxyInterface for WatcherProxy {
641    type OnLevelChangedResponseFut =
642        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
643    fn r#on_level_changed(&self, mut level: Level) -> Self::OnLevelChangedResponseFut {
644        fn _decode(
645            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
646        ) -> Result<(), fidl::Error> {
647            let _response = fidl::client::decode_transaction_body::<
648                fidl::encoding::EmptyPayload,
649                fidl::encoding::DefaultFuchsiaResourceDialect,
650                0x55d559533407fed9,
651            >(_buf?)?;
652            Ok(_response)
653        }
654        self.client.send_query_and_decode::<WatcherOnLevelChangedRequest, ()>(
655            (level,),
656            0x55d559533407fed9,
657            fidl::encoding::DynamicFlags::empty(),
658            _decode,
659        )
660    }
661}
662
663pub struct WatcherEventStream {
664    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
665}
666
667impl std::marker::Unpin for WatcherEventStream {}
668
669impl futures::stream::FusedStream for WatcherEventStream {
670    fn is_terminated(&self) -> bool {
671        self.event_receiver.is_terminated()
672    }
673}
674
675impl futures::Stream for WatcherEventStream {
676    type Item = Result<WatcherEvent, fidl::Error>;
677
678    fn poll_next(
679        mut self: std::pin::Pin<&mut Self>,
680        cx: &mut std::task::Context<'_>,
681    ) -> std::task::Poll<Option<Self::Item>> {
682        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
683            &mut self.event_receiver,
684            cx
685        )?) {
686            Some(buf) => std::task::Poll::Ready(Some(WatcherEvent::decode(buf))),
687            None => std::task::Poll::Ready(None),
688        }
689    }
690}
691
692#[derive(Debug)]
693pub enum WatcherEvent {}
694
695impl WatcherEvent {
696    /// Decodes a message buffer as a [`WatcherEvent`].
697    fn decode(
698        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
699    ) -> Result<WatcherEvent, fidl::Error> {
700        let (bytes, _handles) = buf.split_mut();
701        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
702        debug_assert_eq!(tx_header.tx_id, 0);
703        match tx_header.ordinal {
704            _ => Err(fidl::Error::UnknownOrdinal {
705                ordinal: tx_header.ordinal,
706                protocol_name: <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
707            }),
708        }
709    }
710}
711
712/// A Stream of incoming requests for fuchsia.memorypressure/Watcher.
713pub struct WatcherRequestStream {
714    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
715    is_terminated: bool,
716}
717
718impl std::marker::Unpin for WatcherRequestStream {}
719
720impl futures::stream::FusedStream for WatcherRequestStream {
721    fn is_terminated(&self) -> bool {
722        self.is_terminated
723    }
724}
725
726impl fidl::endpoints::RequestStream for WatcherRequestStream {
727    type Protocol = WatcherMarker;
728    type ControlHandle = WatcherControlHandle;
729
730    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
731        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
732    }
733
734    fn control_handle(&self) -> Self::ControlHandle {
735        WatcherControlHandle { inner: self.inner.clone() }
736    }
737
738    fn into_inner(
739        self,
740    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
741    {
742        (self.inner, self.is_terminated)
743    }
744
745    fn from_inner(
746        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
747        is_terminated: bool,
748    ) -> Self {
749        Self { inner, is_terminated }
750    }
751}
752
753impl futures::Stream for WatcherRequestStream {
754    type Item = Result<WatcherRequest, fidl::Error>;
755
756    fn poll_next(
757        mut self: std::pin::Pin<&mut Self>,
758        cx: &mut std::task::Context<'_>,
759    ) -> std::task::Poll<Option<Self::Item>> {
760        let this = &mut *self;
761        if this.inner.check_shutdown(cx) {
762            this.is_terminated = true;
763            return std::task::Poll::Ready(None);
764        }
765        if this.is_terminated {
766            panic!("polled WatcherRequestStream after completion");
767        }
768        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
769            |bytes, handles| {
770                match this.inner.channel().read_etc(cx, bytes, handles) {
771                    std::task::Poll::Ready(Ok(())) => {}
772                    std::task::Poll::Pending => return std::task::Poll::Pending,
773                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
774                        this.is_terminated = true;
775                        return std::task::Poll::Ready(None);
776                    }
777                    std::task::Poll::Ready(Err(e)) => {
778                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
779                            e.into(),
780                        ))));
781                    }
782                }
783
784                // A message has been received from the channel
785                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
786
787                std::task::Poll::Ready(Some(match header.ordinal {
788                    0x55d559533407fed9 => {
789                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
790                        let mut req = fidl::new_empty!(
791                            WatcherOnLevelChangedRequest,
792                            fidl::encoding::DefaultFuchsiaResourceDialect
793                        );
794                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<WatcherOnLevelChangedRequest>(&header, _body_bytes, handles, &mut req)?;
795                        let control_handle = WatcherControlHandle { inner: this.inner.clone() };
796                        Ok(WatcherRequest::OnLevelChanged {
797                            level: req.level,
798
799                            responder: WatcherOnLevelChangedResponder {
800                                control_handle: std::mem::ManuallyDrop::new(control_handle),
801                                tx_id: header.tx_id,
802                            },
803                        })
804                    }
805                    _ => Err(fidl::Error::UnknownOrdinal {
806                        ordinal: header.ordinal,
807                        protocol_name:
808                            <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
809                    }),
810                }))
811            },
812        )
813    }
814}
815
816/// Watcher protocol
817/// To be implemented by clients who wish to be notified on memory pressure level changes.
818#[derive(Debug)]
819pub enum WatcherRequest {
820    /// Sent to the registered client when the memory pressure level changes.
821    /// `level`: indicates the current memory pressure level.
822    ///
823    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
824    /// registered client can discover the current memory pressure level.
825    ///
826    /// The watcher must immediately reply with a message to acknowledge that it has
827    /// received the level change notification, and has initiated required actions as a
828    /// result. It may then continue to reclaim memory asynchronously after sending
829    /// the acknowledgement.
830    ///
831    /// Some helpful guidelines for clients:
832    /// 1. The watcher will be notified of new pressure level changes only after a reply
833    /// corresponding to the previous message has been received by the provider.
834    /// If multiple level transitions occur during that time, the watcher will be
835    /// notified of the latest pressure level.
836    ///
837    /// 2. The level changes are edge-triggered, and clients are expected to maintain
838    /// local state to track the current pressure level, if required. For example,
839    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
840    /// Some time after this, it might want to trigger an activity that causes a
841    /// fair amount of memory to be allocated. At this point, the job is expected to
842    /// remember that the last pressure level it saw was CRITICAL, and refrain from
843    /// triggering the memory-intensive activity.
844    ///
845    /// 3. As a performance optimization, the provider may decide to skip sending
846    /// messages for some pressure level changes. For example, when oscillating across
847    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
848    /// single transition. The provider might rate-limit messages in this case.
849    /// On a similar note, the provider may decide to send repeated messages at the
850    /// same pressure level, particularly CRITICAL, to indicate that further action
851    /// needs to be taken.
852    OnLevelChanged { level: Level, responder: WatcherOnLevelChangedResponder },
853}
854
855impl WatcherRequest {
856    #[allow(irrefutable_let_patterns)]
857    pub fn into_on_level_changed(self) -> Option<(Level, WatcherOnLevelChangedResponder)> {
858        if let WatcherRequest::OnLevelChanged { level, responder } = self {
859            Some((level, responder))
860        } else {
861            None
862        }
863    }
864
865    /// Name of the method defined in FIDL
866    pub fn method_name(&self) -> &'static str {
867        match *self {
868            WatcherRequest::OnLevelChanged { .. } => "on_level_changed",
869        }
870    }
871}
872
873#[derive(Debug, Clone)]
874pub struct WatcherControlHandle {
875    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
876}
877
878impl fidl::endpoints::ControlHandle for WatcherControlHandle {
879    fn shutdown(&self) {
880        self.inner.shutdown()
881    }
882
883    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
884        self.inner.shutdown_with_epitaph(status)
885    }
886
887    fn is_closed(&self) -> bool {
888        self.inner.channel().is_closed()
889    }
890    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
891        self.inner.channel().on_closed()
892    }
893
894    #[cfg(target_os = "fuchsia")]
895    fn signal_peer(
896        &self,
897        clear_mask: zx::Signals,
898        set_mask: zx::Signals,
899    ) -> Result<(), zx_status::Status> {
900        use fidl::Peered;
901        self.inner.channel().signal_peer(clear_mask, set_mask)
902    }
903}
904
905impl WatcherControlHandle {}
906
907#[must_use = "FIDL methods require a response to be sent"]
908#[derive(Debug)]
909pub struct WatcherOnLevelChangedResponder {
910    control_handle: std::mem::ManuallyDrop<WatcherControlHandle>,
911    tx_id: u32,
912}
913
914/// Set the the channel to be shutdown (see [`WatcherControlHandle::shutdown`])
915/// if the responder is dropped without sending a response, so that the client
916/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
917impl std::ops::Drop for WatcherOnLevelChangedResponder {
918    fn drop(&mut self) {
919        self.control_handle.shutdown();
920        // Safety: drops once, never accessed again
921        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
922    }
923}
924
925impl fidl::endpoints::Responder for WatcherOnLevelChangedResponder {
926    type ControlHandle = WatcherControlHandle;
927
928    fn control_handle(&self) -> &WatcherControlHandle {
929        &self.control_handle
930    }
931
932    fn drop_without_shutdown(mut self) {
933        // Safety: drops once, never accessed again due to mem::forget
934        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
935        // Prevent Drop from running (which would shut down the channel)
936        std::mem::forget(self);
937    }
938}
939
940impl WatcherOnLevelChangedResponder {
941    /// Sends a response to the FIDL transaction.
942    ///
943    /// Sets the channel to shutdown if an error occurs.
944    pub fn send(self) -> Result<(), fidl::Error> {
945        let _result = self.send_raw();
946        if _result.is_err() {
947            self.control_handle.shutdown();
948        }
949        self.drop_without_shutdown();
950        _result
951    }
952
953    /// Similar to "send" but does not shutdown the channel if an error occurs.
954    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
955        let _result = self.send_raw();
956        self.drop_without_shutdown();
957        _result
958    }
959
960    fn send_raw(&self) -> Result<(), fidl::Error> {
961        self.control_handle.inner.send::<fidl::encoding::EmptyPayload>(
962            (),
963            self.tx_id,
964            0x55d559533407fed9,
965            fidl::encoding::DynamicFlags::empty(),
966        )
967    }
968}
969
970mod internal {
971    use super::*;
972
973    impl fidl::encoding::ResourceTypeMarker for ProviderRegisterWatcherRequest {
974        type Borrowed<'a> = &'a mut Self;
975        fn take_or_borrow<'a>(
976            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
977        ) -> Self::Borrowed<'a> {
978            value
979        }
980    }
981
982    unsafe impl fidl::encoding::TypeMarker for ProviderRegisterWatcherRequest {
983        type Owned = Self;
984
985        #[inline(always)]
986        fn inline_align(_context: fidl::encoding::Context) -> usize {
987            4
988        }
989
990        #[inline(always)]
991        fn inline_size(_context: fidl::encoding::Context) -> usize {
992            4
993        }
994    }
995
996    unsafe impl
997        fidl::encoding::Encode<
998            ProviderRegisterWatcherRequest,
999            fidl::encoding::DefaultFuchsiaResourceDialect,
1000        > for &mut ProviderRegisterWatcherRequest
1001    {
1002        #[inline]
1003        unsafe fn encode(
1004            self,
1005            encoder: &mut fidl::encoding::Encoder<
1006                '_,
1007                fidl::encoding::DefaultFuchsiaResourceDialect,
1008            >,
1009            offset: usize,
1010            _depth: fidl::encoding::Depth,
1011        ) -> fidl::Result<()> {
1012            encoder.debug_check_bounds::<ProviderRegisterWatcherRequest>(offset);
1013            // Delegate to tuple encoding.
1014            fidl::encoding::Encode::<ProviderRegisterWatcherRequest, fidl::encoding::DefaultFuchsiaResourceDialect>::encode(
1015                (
1016                    <fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow(&mut self.watcher),
1017                ),
1018                encoder, offset, _depth
1019            )
1020        }
1021    }
1022    unsafe impl<
1023        T0: fidl::encoding::Encode<
1024                fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
1025                fidl::encoding::DefaultFuchsiaResourceDialect,
1026            >,
1027    >
1028        fidl::encoding::Encode<
1029            ProviderRegisterWatcherRequest,
1030            fidl::encoding::DefaultFuchsiaResourceDialect,
1031        > for (T0,)
1032    {
1033        #[inline]
1034        unsafe fn encode(
1035            self,
1036            encoder: &mut fidl::encoding::Encoder<
1037                '_,
1038                fidl::encoding::DefaultFuchsiaResourceDialect,
1039            >,
1040            offset: usize,
1041            depth: fidl::encoding::Depth,
1042        ) -> fidl::Result<()> {
1043            encoder.debug_check_bounds::<ProviderRegisterWatcherRequest>(offset);
1044            // Zero out padding regions. There's no need to apply masks
1045            // because the unmasked parts will be overwritten by fields.
1046            // Write the fields.
1047            self.0.encode(encoder, offset + 0, depth)?;
1048            Ok(())
1049        }
1050    }
1051
1052    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
1053        for ProviderRegisterWatcherRequest
1054    {
1055        #[inline(always)]
1056        fn new_empty() -> Self {
1057            Self {
1058                watcher: fidl::new_empty!(
1059                    fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
1060                    fidl::encoding::DefaultFuchsiaResourceDialect
1061                ),
1062            }
1063        }
1064
1065        #[inline]
1066        unsafe fn decode(
1067            &mut self,
1068            decoder: &mut fidl::encoding::Decoder<
1069                '_,
1070                fidl::encoding::DefaultFuchsiaResourceDialect,
1071            >,
1072            offset: usize,
1073            _depth: fidl::encoding::Depth,
1074        ) -> fidl::Result<()> {
1075            decoder.debug_check_bounds::<Self>(offset);
1076            // Verify that padding bytes are zero.
1077            fidl::decode!(
1078                fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
1079                fidl::encoding::DefaultFuchsiaResourceDialect,
1080                &mut self.watcher,
1081                decoder,
1082                offset + 0,
1083                _depth
1084            )?;
1085            Ok(())
1086        }
1087    }
1088}