fidl_fuchsia_memorypressure/
fidl_fuchsia_memorypressure.rs

1// WARNING: This file is machine generated by fidlgen.
2
3#![warn(clippy::all)]
4#![allow(unused_parens, unused_mut, unused_imports, nonstandard_style)]
5
6use bitflags::bitflags;
7use fidl::client::QueryResponseFut;
8use fidl::encoding::{MessageBufFor, ProxyChannelBox, ResourceDialect};
9use fidl::endpoints::{ControlHandle as _, Responder as _};
10pub use fidl_fuchsia_memorypressure__common::*;
11use futures::future::{self, MaybeDone, TryFutureExt};
12use zx_status;
13
14#[derive(Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
15pub struct ProviderRegisterWatcherRequest {
16    pub watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
17}
18
19impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
20    for ProviderRegisterWatcherRequest
21{
22}
23
24#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
25pub struct ProviderMarker;
26
27impl fidl::endpoints::ProtocolMarker for ProviderMarker {
28    type Proxy = ProviderProxy;
29    type RequestStream = ProviderRequestStream;
30    #[cfg(target_os = "fuchsia")]
31    type SynchronousProxy = ProviderSynchronousProxy;
32
33    const DEBUG_NAME: &'static str = "fuchsia.memorypressure.Provider";
34}
35impl fidl::endpoints::DiscoverableProtocolMarker for ProviderMarker {}
36
37pub trait ProviderProxyInterface: Send + Sync {
38    fn r#register_watcher(
39        &self,
40        watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
41    ) -> Result<(), fidl::Error>;
42}
43#[derive(Debug)]
44#[cfg(target_os = "fuchsia")]
45pub struct ProviderSynchronousProxy {
46    client: fidl::client::sync::Client,
47}
48
49#[cfg(target_os = "fuchsia")]
50impl fidl::endpoints::SynchronousProxy for ProviderSynchronousProxy {
51    type Proxy = ProviderProxy;
52    type Protocol = ProviderMarker;
53
54    fn from_channel(inner: fidl::Channel) -> Self {
55        Self::new(inner)
56    }
57
58    fn into_channel(self) -> fidl::Channel {
59        self.client.into_channel()
60    }
61
62    fn as_channel(&self) -> &fidl::Channel {
63        self.client.as_channel()
64    }
65}
66
67#[cfg(target_os = "fuchsia")]
68impl ProviderSynchronousProxy {
69    pub fn new(channel: fidl::Channel) -> Self {
70        let protocol_name = <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
71        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
72    }
73
74    pub fn into_channel(self) -> fidl::Channel {
75        self.client.into_channel()
76    }
77
78    /// Waits until an event arrives and returns it. It is safe for other
79    /// threads to make concurrent requests while waiting for an event.
80    pub fn wait_for_event(
81        &self,
82        deadline: zx::MonotonicInstant,
83    ) -> Result<ProviderEvent, fidl::Error> {
84        ProviderEvent::decode(self.client.wait_for_event(deadline)?)
85    }
86
87    /// Used to register for memory pressure level changes.
88    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
89    /// level change messages to the client.
90    ///
91    /// The current memory pressure level is immediately sent to the watcher
92    /// when this method is called.
93    ///
94    /// It is recommended that the root job in a component tree register for changes,
95    /// rather than having individual jobs further down the tree register individually.
96    /// A low client count will help minimize system churn due to a large number of
97    /// memory pressure messages in transit at the same time.
98    /// Also, the more context a job has, the better equipped it will be to react to
99    /// memory pressure by controlling the behavior of children jobs in its tree.
100    pub fn r#register_watcher(
101        &self,
102        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
103    ) -> Result<(), fidl::Error> {
104        self.client.send::<ProviderRegisterWatcherRequest>(
105            (watcher,),
106            0x91e65af25aae4a9,
107            fidl::encoding::DynamicFlags::empty(),
108        )
109    }
110}
111
112#[cfg(target_os = "fuchsia")]
113impl From<ProviderSynchronousProxy> for zx::Handle {
114    fn from(value: ProviderSynchronousProxy) -> Self {
115        value.into_channel().into()
116    }
117}
118
119#[cfg(target_os = "fuchsia")]
120impl From<fidl::Channel> for ProviderSynchronousProxy {
121    fn from(value: fidl::Channel) -> Self {
122        Self::new(value)
123    }
124}
125
126#[cfg(target_os = "fuchsia")]
127impl fidl::endpoints::FromClient for ProviderSynchronousProxy {
128    type Protocol = ProviderMarker;
129
130    fn from_client(value: fidl::endpoints::ClientEnd<ProviderMarker>) -> Self {
131        Self::new(value.into_channel())
132    }
133}
134
135#[derive(Debug, Clone)]
136pub struct ProviderProxy {
137    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
138}
139
140impl fidl::endpoints::Proxy for ProviderProxy {
141    type Protocol = ProviderMarker;
142
143    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
144        Self::new(inner)
145    }
146
147    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
148        self.client.into_channel().map_err(|client| Self { client })
149    }
150
151    fn as_channel(&self) -> &::fidl::AsyncChannel {
152        self.client.as_channel()
153    }
154}
155
156impl ProviderProxy {
157    /// Create a new Proxy for fuchsia.memorypressure/Provider.
158    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
159        let protocol_name = <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
160        Self { client: fidl::client::Client::new(channel, protocol_name) }
161    }
162
163    /// Get a Stream of events from the remote end of the protocol.
164    ///
165    /// # Panics
166    ///
167    /// Panics if the event stream was already taken.
168    pub fn take_event_stream(&self) -> ProviderEventStream {
169        ProviderEventStream { event_receiver: self.client.take_event_receiver() }
170    }
171
172    /// Used to register for memory pressure level changes.
173    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
174    /// level change messages to the client.
175    ///
176    /// The current memory pressure level is immediately sent to the watcher
177    /// when this method is called.
178    ///
179    /// It is recommended that the root job in a component tree register for changes,
180    /// rather than having individual jobs further down the tree register individually.
181    /// A low client count will help minimize system churn due to a large number of
182    /// memory pressure messages in transit at the same time.
183    /// Also, the more context a job has, the better equipped it will be to react to
184    /// memory pressure by controlling the behavior of children jobs in its tree.
185    pub fn r#register_watcher(
186        &self,
187        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
188    ) -> Result<(), fidl::Error> {
189        ProviderProxyInterface::r#register_watcher(self, watcher)
190    }
191}
192
193impl ProviderProxyInterface for ProviderProxy {
194    fn r#register_watcher(
195        &self,
196        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
197    ) -> Result<(), fidl::Error> {
198        self.client.send::<ProviderRegisterWatcherRequest>(
199            (watcher,),
200            0x91e65af25aae4a9,
201            fidl::encoding::DynamicFlags::empty(),
202        )
203    }
204}
205
206pub struct ProviderEventStream {
207    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
208}
209
210impl std::marker::Unpin for ProviderEventStream {}
211
212impl futures::stream::FusedStream for ProviderEventStream {
213    fn is_terminated(&self) -> bool {
214        self.event_receiver.is_terminated()
215    }
216}
217
218impl futures::Stream for ProviderEventStream {
219    type Item = Result<ProviderEvent, fidl::Error>;
220
221    fn poll_next(
222        mut self: std::pin::Pin<&mut Self>,
223        cx: &mut std::task::Context<'_>,
224    ) -> std::task::Poll<Option<Self::Item>> {
225        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
226            &mut self.event_receiver,
227            cx
228        )?) {
229            Some(buf) => std::task::Poll::Ready(Some(ProviderEvent::decode(buf))),
230            None => std::task::Poll::Ready(None),
231        }
232    }
233}
234
235#[derive(Debug)]
236pub enum ProviderEvent {}
237
238impl ProviderEvent {
239    /// Decodes a message buffer as a [`ProviderEvent`].
240    fn decode(
241        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
242    ) -> Result<ProviderEvent, fidl::Error> {
243        let (bytes, _handles) = buf.split_mut();
244        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
245        debug_assert_eq!(tx_header.tx_id, 0);
246        match tx_header.ordinal {
247            _ => Err(fidl::Error::UnknownOrdinal {
248                ordinal: tx_header.ordinal,
249                protocol_name: <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
250            }),
251        }
252    }
253}
254
255/// A Stream of incoming requests for fuchsia.memorypressure/Provider.
256pub struct ProviderRequestStream {
257    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
258    is_terminated: bool,
259}
260
261impl std::marker::Unpin for ProviderRequestStream {}
262
263impl futures::stream::FusedStream for ProviderRequestStream {
264    fn is_terminated(&self) -> bool {
265        self.is_terminated
266    }
267}
268
269impl fidl::endpoints::RequestStream for ProviderRequestStream {
270    type Protocol = ProviderMarker;
271    type ControlHandle = ProviderControlHandle;
272
273    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
274        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
275    }
276
277    fn control_handle(&self) -> Self::ControlHandle {
278        ProviderControlHandle { inner: self.inner.clone() }
279    }
280
281    fn into_inner(
282        self,
283    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
284    {
285        (self.inner, self.is_terminated)
286    }
287
288    fn from_inner(
289        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
290        is_terminated: bool,
291    ) -> Self {
292        Self { inner, is_terminated }
293    }
294}
295
296impl futures::Stream for ProviderRequestStream {
297    type Item = Result<ProviderRequest, fidl::Error>;
298
299    fn poll_next(
300        mut self: std::pin::Pin<&mut Self>,
301        cx: &mut std::task::Context<'_>,
302    ) -> std::task::Poll<Option<Self::Item>> {
303        let this = &mut *self;
304        if this.inner.check_shutdown(cx) {
305            this.is_terminated = true;
306            return std::task::Poll::Ready(None);
307        }
308        if this.is_terminated {
309            panic!("polled ProviderRequestStream after completion");
310        }
311        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
312            |bytes, handles| {
313                match this.inner.channel().read_etc(cx, bytes, handles) {
314                    std::task::Poll::Ready(Ok(())) => {}
315                    std::task::Poll::Pending => return std::task::Poll::Pending,
316                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
317                        this.is_terminated = true;
318                        return std::task::Poll::Ready(None);
319                    }
320                    std::task::Poll::Ready(Err(e)) => {
321                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
322                            e.into(),
323                        ))))
324                    }
325                }
326
327                // A message has been received from the channel
328                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
329
330                std::task::Poll::Ready(Some(match header.ordinal {
331                    0x91e65af25aae4a9 => {
332                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
333                        let mut req = fidl::new_empty!(
334                            ProviderRegisterWatcherRequest,
335                            fidl::encoding::DefaultFuchsiaResourceDialect
336                        );
337                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<ProviderRegisterWatcherRequest>(&header, _body_bytes, handles, &mut req)?;
338                        let control_handle = ProviderControlHandle { inner: this.inner.clone() };
339                        Ok(ProviderRequest::RegisterWatcher {
340                            watcher: req.watcher,
341
342                            control_handle,
343                        })
344                    }
345                    _ => Err(fidl::Error::UnknownOrdinal {
346                        ordinal: header.ordinal,
347                        protocol_name:
348                            <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
349                    }),
350                }))
351            },
352        )
353    }
354}
355
356/// Registration protocol
357#[derive(Debug)]
358pub enum ProviderRequest {
359    /// Used to register for memory pressure level changes.
360    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
361    /// level change messages to the client.
362    ///
363    /// The current memory pressure level is immediately sent to the watcher
364    /// when this method is called.
365    ///
366    /// It is recommended that the root job in a component tree register for changes,
367    /// rather than having individual jobs further down the tree register individually.
368    /// A low client count will help minimize system churn due to a large number of
369    /// memory pressure messages in transit at the same time.
370    /// Also, the more context a job has, the better equipped it will be to react to
371    /// memory pressure by controlling the behavior of children jobs in its tree.
372    RegisterWatcher {
373        watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
374        control_handle: ProviderControlHandle,
375    },
376}
377
378impl ProviderRequest {
379    #[allow(irrefutable_let_patterns)]
380    pub fn into_register_watcher(
381        self,
382    ) -> Option<(fidl::endpoints::ClientEnd<WatcherMarker>, ProviderControlHandle)> {
383        if let ProviderRequest::RegisterWatcher { watcher, control_handle } = self {
384            Some((watcher, control_handle))
385        } else {
386            None
387        }
388    }
389
390    /// Name of the method defined in FIDL
391    pub fn method_name(&self) -> &'static str {
392        match *self {
393            ProviderRequest::RegisterWatcher { .. } => "register_watcher",
394        }
395    }
396}
397
398#[derive(Debug, Clone)]
399pub struct ProviderControlHandle {
400    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
401}
402
403impl fidl::endpoints::ControlHandle for ProviderControlHandle {
404    fn shutdown(&self) {
405        self.inner.shutdown()
406    }
407    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
408        self.inner.shutdown_with_epitaph(status)
409    }
410
411    fn is_closed(&self) -> bool {
412        self.inner.channel().is_closed()
413    }
414    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
415        self.inner.channel().on_closed()
416    }
417
418    #[cfg(target_os = "fuchsia")]
419    fn signal_peer(
420        &self,
421        clear_mask: zx::Signals,
422        set_mask: zx::Signals,
423    ) -> Result<(), zx_status::Status> {
424        use fidl::Peered;
425        self.inner.channel().signal_peer(clear_mask, set_mask)
426    }
427}
428
429impl ProviderControlHandle {}
430
431#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
432pub struct WatcherMarker;
433
434impl fidl::endpoints::ProtocolMarker for WatcherMarker {
435    type Proxy = WatcherProxy;
436    type RequestStream = WatcherRequestStream;
437    #[cfg(target_os = "fuchsia")]
438    type SynchronousProxy = WatcherSynchronousProxy;
439
440    const DEBUG_NAME: &'static str = "(anonymous) Watcher";
441}
442
443pub trait WatcherProxyInterface: Send + Sync {
444    type OnLevelChangedResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
445    fn r#on_level_changed(&self, level: Level) -> Self::OnLevelChangedResponseFut;
446}
447#[derive(Debug)]
448#[cfg(target_os = "fuchsia")]
449pub struct WatcherSynchronousProxy {
450    client: fidl::client::sync::Client,
451}
452
453#[cfg(target_os = "fuchsia")]
454impl fidl::endpoints::SynchronousProxy for WatcherSynchronousProxy {
455    type Proxy = WatcherProxy;
456    type Protocol = WatcherMarker;
457
458    fn from_channel(inner: fidl::Channel) -> Self {
459        Self::new(inner)
460    }
461
462    fn into_channel(self) -> fidl::Channel {
463        self.client.into_channel()
464    }
465
466    fn as_channel(&self) -> &fidl::Channel {
467        self.client.as_channel()
468    }
469}
470
471#[cfg(target_os = "fuchsia")]
472impl WatcherSynchronousProxy {
473    pub fn new(channel: fidl::Channel) -> Self {
474        let protocol_name = <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
475        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
476    }
477
478    pub fn into_channel(self) -> fidl::Channel {
479        self.client.into_channel()
480    }
481
482    /// Waits until an event arrives and returns it. It is safe for other
483    /// threads to make concurrent requests while waiting for an event.
484    pub fn wait_for_event(
485        &self,
486        deadline: zx::MonotonicInstant,
487    ) -> Result<WatcherEvent, fidl::Error> {
488        WatcherEvent::decode(self.client.wait_for_event(deadline)?)
489    }
490
491    /// Sent to the registered client when the memory pressure level changes.
492    /// `level`: indicates the current memory pressure level.
493    ///
494    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
495    /// registered client can discover the current memory pressure level.
496    ///
497    /// The watcher must immediately reply with a message to acknowledge that it has
498    /// received the level change notification, and has initiated required actions as a
499    /// result. It may then continue to reclaim memory asynchronously after sending
500    /// the acknowledgement.
501    ///
502    /// Some helpful guidelines for clients:
503    /// 1. The watcher will be notified of new pressure level changes only after a reply
504    /// corresponding to the previous message has been received by the provider.
505    /// If multiple level transitions occur during that time, the watcher will be
506    /// notified of the latest pressure level.
507    ///
508    /// 2. The level changes are edge-triggered, and clients are expected to maintain
509    /// local state to track the current pressure level, if required. For example,
510    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
511    /// Some time after this, it might want to trigger an activity that causes a
512    /// fair amount of memory to be allocated. At this point, the job is expected to
513    /// remember that the last pressure level it saw was CRITICAL, and refrain from
514    /// triggering the memory-intensive activity.
515    ///
516    /// 3. As a performance optimization, the provider may decide to skip sending
517    /// messages for some pressure level changes. For example, when oscillating across
518    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
519    /// single transition. The provider might rate-limit messages in this case.
520    /// On a similar note, the provider may decide to send repeated messages at the
521    /// same pressure level, particularly CRITICAL, to indicate that further action
522    /// needs to be taken.
523    pub fn r#on_level_changed(
524        &self,
525        mut level: Level,
526        ___deadline: zx::MonotonicInstant,
527    ) -> Result<(), fidl::Error> {
528        let _response =
529            self.client.send_query::<WatcherOnLevelChangedRequest, fidl::encoding::EmptyPayload>(
530                (level,),
531                0x55d559533407fed9,
532                fidl::encoding::DynamicFlags::empty(),
533                ___deadline,
534            )?;
535        Ok(_response)
536    }
537}
538
539#[cfg(target_os = "fuchsia")]
540impl From<WatcherSynchronousProxy> for zx::Handle {
541    fn from(value: WatcherSynchronousProxy) -> Self {
542        value.into_channel().into()
543    }
544}
545
546#[cfg(target_os = "fuchsia")]
547impl From<fidl::Channel> for WatcherSynchronousProxy {
548    fn from(value: fidl::Channel) -> Self {
549        Self::new(value)
550    }
551}
552
553#[cfg(target_os = "fuchsia")]
554impl fidl::endpoints::FromClient for WatcherSynchronousProxy {
555    type Protocol = WatcherMarker;
556
557    fn from_client(value: fidl::endpoints::ClientEnd<WatcherMarker>) -> Self {
558        Self::new(value.into_channel())
559    }
560}
561
562#[derive(Debug, Clone)]
563pub struct WatcherProxy {
564    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
565}
566
567impl fidl::endpoints::Proxy for WatcherProxy {
568    type Protocol = WatcherMarker;
569
570    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
571        Self::new(inner)
572    }
573
574    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
575        self.client.into_channel().map_err(|client| Self { client })
576    }
577
578    fn as_channel(&self) -> &::fidl::AsyncChannel {
579        self.client.as_channel()
580    }
581}
582
583impl WatcherProxy {
584    /// Create a new Proxy for fuchsia.memorypressure/Watcher.
585    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
586        let protocol_name = <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
587        Self { client: fidl::client::Client::new(channel, protocol_name) }
588    }
589
590    /// Get a Stream of events from the remote end of the protocol.
591    ///
592    /// # Panics
593    ///
594    /// Panics if the event stream was already taken.
595    pub fn take_event_stream(&self) -> WatcherEventStream {
596        WatcherEventStream { event_receiver: self.client.take_event_receiver() }
597    }
598
599    /// Sent to the registered client when the memory pressure level changes.
600    /// `level`: indicates the current memory pressure level.
601    ///
602    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
603    /// registered client can discover the current memory pressure level.
604    ///
605    /// The watcher must immediately reply with a message to acknowledge that it has
606    /// received the level change notification, and has initiated required actions as a
607    /// result. It may then continue to reclaim memory asynchronously after sending
608    /// the acknowledgement.
609    ///
610    /// Some helpful guidelines for clients:
611    /// 1. The watcher will be notified of new pressure level changes only after a reply
612    /// corresponding to the previous message has been received by the provider.
613    /// If multiple level transitions occur during that time, the watcher will be
614    /// notified of the latest pressure level.
615    ///
616    /// 2. The level changes are edge-triggered, and clients are expected to maintain
617    /// local state to track the current pressure level, if required. For example,
618    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
619    /// Some time after this, it might want to trigger an activity that causes a
620    /// fair amount of memory to be allocated. At this point, the job is expected to
621    /// remember that the last pressure level it saw was CRITICAL, and refrain from
622    /// triggering the memory-intensive activity.
623    ///
624    /// 3. As a performance optimization, the provider may decide to skip sending
625    /// messages for some pressure level changes. For example, when oscillating across
626    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
627    /// single transition. The provider might rate-limit messages in this case.
628    /// On a similar note, the provider may decide to send repeated messages at the
629    /// same pressure level, particularly CRITICAL, to indicate that further action
630    /// needs to be taken.
631    pub fn r#on_level_changed(
632        &self,
633        mut level: Level,
634    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
635        WatcherProxyInterface::r#on_level_changed(self, level)
636    }
637}
638
639impl WatcherProxyInterface for WatcherProxy {
640    type OnLevelChangedResponseFut =
641        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
642    fn r#on_level_changed(&self, mut level: Level) -> Self::OnLevelChangedResponseFut {
643        fn _decode(
644            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
645        ) -> Result<(), fidl::Error> {
646            let _response = fidl::client::decode_transaction_body::<
647                fidl::encoding::EmptyPayload,
648                fidl::encoding::DefaultFuchsiaResourceDialect,
649                0x55d559533407fed9,
650            >(_buf?)?;
651            Ok(_response)
652        }
653        self.client.send_query_and_decode::<WatcherOnLevelChangedRequest, ()>(
654            (level,),
655            0x55d559533407fed9,
656            fidl::encoding::DynamicFlags::empty(),
657            _decode,
658        )
659    }
660}
661
662pub struct WatcherEventStream {
663    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
664}
665
666impl std::marker::Unpin for WatcherEventStream {}
667
668impl futures::stream::FusedStream for WatcherEventStream {
669    fn is_terminated(&self) -> bool {
670        self.event_receiver.is_terminated()
671    }
672}
673
674impl futures::Stream for WatcherEventStream {
675    type Item = Result<WatcherEvent, fidl::Error>;
676
677    fn poll_next(
678        mut self: std::pin::Pin<&mut Self>,
679        cx: &mut std::task::Context<'_>,
680    ) -> std::task::Poll<Option<Self::Item>> {
681        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
682            &mut self.event_receiver,
683            cx
684        )?) {
685            Some(buf) => std::task::Poll::Ready(Some(WatcherEvent::decode(buf))),
686            None => std::task::Poll::Ready(None),
687        }
688    }
689}
690
691#[derive(Debug)]
692pub enum WatcherEvent {}
693
694impl WatcherEvent {
695    /// Decodes a message buffer as a [`WatcherEvent`].
696    fn decode(
697        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
698    ) -> Result<WatcherEvent, fidl::Error> {
699        let (bytes, _handles) = buf.split_mut();
700        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
701        debug_assert_eq!(tx_header.tx_id, 0);
702        match tx_header.ordinal {
703            _ => Err(fidl::Error::UnknownOrdinal {
704                ordinal: tx_header.ordinal,
705                protocol_name: <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
706            }),
707        }
708    }
709}
710
711/// A Stream of incoming requests for fuchsia.memorypressure/Watcher.
712pub struct WatcherRequestStream {
713    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
714    is_terminated: bool,
715}
716
717impl std::marker::Unpin for WatcherRequestStream {}
718
719impl futures::stream::FusedStream for WatcherRequestStream {
720    fn is_terminated(&self) -> bool {
721        self.is_terminated
722    }
723}
724
725impl fidl::endpoints::RequestStream for WatcherRequestStream {
726    type Protocol = WatcherMarker;
727    type ControlHandle = WatcherControlHandle;
728
729    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
730        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
731    }
732
733    fn control_handle(&self) -> Self::ControlHandle {
734        WatcherControlHandle { inner: self.inner.clone() }
735    }
736
737    fn into_inner(
738        self,
739    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
740    {
741        (self.inner, self.is_terminated)
742    }
743
744    fn from_inner(
745        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
746        is_terminated: bool,
747    ) -> Self {
748        Self { inner, is_terminated }
749    }
750}
751
752impl futures::Stream for WatcherRequestStream {
753    type Item = Result<WatcherRequest, fidl::Error>;
754
755    fn poll_next(
756        mut self: std::pin::Pin<&mut Self>,
757        cx: &mut std::task::Context<'_>,
758    ) -> std::task::Poll<Option<Self::Item>> {
759        let this = &mut *self;
760        if this.inner.check_shutdown(cx) {
761            this.is_terminated = true;
762            return std::task::Poll::Ready(None);
763        }
764        if this.is_terminated {
765            panic!("polled WatcherRequestStream after completion");
766        }
767        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
768            |bytes, handles| {
769                match this.inner.channel().read_etc(cx, bytes, handles) {
770                    std::task::Poll::Ready(Ok(())) => {}
771                    std::task::Poll::Pending => return std::task::Poll::Pending,
772                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
773                        this.is_terminated = true;
774                        return std::task::Poll::Ready(None);
775                    }
776                    std::task::Poll::Ready(Err(e)) => {
777                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
778                            e.into(),
779                        ))))
780                    }
781                }
782
783                // A message has been received from the channel
784                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
785
786                std::task::Poll::Ready(Some(match header.ordinal {
787                    0x55d559533407fed9 => {
788                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
789                        let mut req = fidl::new_empty!(
790                            WatcherOnLevelChangedRequest,
791                            fidl::encoding::DefaultFuchsiaResourceDialect
792                        );
793                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<WatcherOnLevelChangedRequest>(&header, _body_bytes, handles, &mut req)?;
794                        let control_handle = WatcherControlHandle { inner: this.inner.clone() };
795                        Ok(WatcherRequest::OnLevelChanged {
796                            level: req.level,
797
798                            responder: WatcherOnLevelChangedResponder {
799                                control_handle: std::mem::ManuallyDrop::new(control_handle),
800                                tx_id: header.tx_id,
801                            },
802                        })
803                    }
804                    _ => Err(fidl::Error::UnknownOrdinal {
805                        ordinal: header.ordinal,
806                        protocol_name:
807                            <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
808                    }),
809                }))
810            },
811        )
812    }
813}
814
815/// Watcher protocol
816/// To be implemented by clients who wish to be notified on memory pressure level changes.
817#[derive(Debug)]
818pub enum WatcherRequest {
819    /// Sent to the registered client when the memory pressure level changes.
820    /// `level`: indicates the current memory pressure level.
821    ///
822    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
823    /// registered client can discover the current memory pressure level.
824    ///
825    /// The watcher must immediately reply with a message to acknowledge that it has
826    /// received the level change notification, and has initiated required actions as a
827    /// result. It may then continue to reclaim memory asynchronously after sending
828    /// the acknowledgement.
829    ///
830    /// Some helpful guidelines for clients:
831    /// 1. The watcher will be notified of new pressure level changes only after a reply
832    /// corresponding to the previous message has been received by the provider.
833    /// If multiple level transitions occur during that time, the watcher will be
834    /// notified of the latest pressure level.
835    ///
836    /// 2. The level changes are edge-triggered, and clients are expected to maintain
837    /// local state to track the current pressure level, if required. For example,
838    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
839    /// Some time after this, it might want to trigger an activity that causes a
840    /// fair amount of memory to be allocated. At this point, the job is expected to
841    /// remember that the last pressure level it saw was CRITICAL, and refrain from
842    /// triggering the memory-intensive activity.
843    ///
844    /// 3. As a performance optimization, the provider may decide to skip sending
845    /// messages for some pressure level changes. For example, when oscillating across
846    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
847    /// single transition. The provider might rate-limit messages in this case.
848    /// On a similar note, the provider may decide to send repeated messages at the
849    /// same pressure level, particularly CRITICAL, to indicate that further action
850    /// needs to be taken.
851    OnLevelChanged { level: Level, responder: WatcherOnLevelChangedResponder },
852}
853
854impl WatcherRequest {
855    #[allow(irrefutable_let_patterns)]
856    pub fn into_on_level_changed(self) -> Option<(Level, WatcherOnLevelChangedResponder)> {
857        if let WatcherRequest::OnLevelChanged { level, responder } = self {
858            Some((level, responder))
859        } else {
860            None
861        }
862    }
863
864    /// Name of the method defined in FIDL
865    pub fn method_name(&self) -> &'static str {
866        match *self {
867            WatcherRequest::OnLevelChanged { .. } => "on_level_changed",
868        }
869    }
870}
871
872#[derive(Debug, Clone)]
873pub struct WatcherControlHandle {
874    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
875}
876
877impl fidl::endpoints::ControlHandle for WatcherControlHandle {
878    fn shutdown(&self) {
879        self.inner.shutdown()
880    }
881    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
882        self.inner.shutdown_with_epitaph(status)
883    }
884
885    fn is_closed(&self) -> bool {
886        self.inner.channel().is_closed()
887    }
888    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
889        self.inner.channel().on_closed()
890    }
891
892    #[cfg(target_os = "fuchsia")]
893    fn signal_peer(
894        &self,
895        clear_mask: zx::Signals,
896        set_mask: zx::Signals,
897    ) -> Result<(), zx_status::Status> {
898        use fidl::Peered;
899        self.inner.channel().signal_peer(clear_mask, set_mask)
900    }
901}
902
903impl WatcherControlHandle {}
904
905#[must_use = "FIDL methods require a response to be sent"]
906#[derive(Debug)]
907pub struct WatcherOnLevelChangedResponder {
908    control_handle: std::mem::ManuallyDrop<WatcherControlHandle>,
909    tx_id: u32,
910}
911
912/// Set the the channel to be shutdown (see [`WatcherControlHandle::shutdown`])
913/// if the responder is dropped without sending a response, so that the client
914/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
915impl std::ops::Drop for WatcherOnLevelChangedResponder {
916    fn drop(&mut self) {
917        self.control_handle.shutdown();
918        // Safety: drops once, never accessed again
919        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
920    }
921}
922
923impl fidl::endpoints::Responder for WatcherOnLevelChangedResponder {
924    type ControlHandle = WatcherControlHandle;
925
926    fn control_handle(&self) -> &WatcherControlHandle {
927        &self.control_handle
928    }
929
930    fn drop_without_shutdown(mut self) {
931        // Safety: drops once, never accessed again due to mem::forget
932        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
933        // Prevent Drop from running (which would shut down the channel)
934        std::mem::forget(self);
935    }
936}
937
938impl WatcherOnLevelChangedResponder {
939    /// Sends a response to the FIDL transaction.
940    ///
941    /// Sets the channel to shutdown if an error occurs.
942    pub fn send(self) -> Result<(), fidl::Error> {
943        let _result = self.send_raw();
944        if _result.is_err() {
945            self.control_handle.shutdown();
946        }
947        self.drop_without_shutdown();
948        _result
949    }
950
951    /// Similar to "send" but does not shutdown the channel if an error occurs.
952    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
953        let _result = self.send_raw();
954        self.drop_without_shutdown();
955        _result
956    }
957
958    fn send_raw(&self) -> Result<(), fidl::Error> {
959        self.control_handle.inner.send::<fidl::encoding::EmptyPayload>(
960            (),
961            self.tx_id,
962            0x55d559533407fed9,
963            fidl::encoding::DynamicFlags::empty(),
964        )
965    }
966}
967
968mod internal {
969    use super::*;
970
971    impl fidl::encoding::ResourceTypeMarker for ProviderRegisterWatcherRequest {
972        type Borrowed<'a> = &'a mut Self;
973        fn take_or_borrow<'a>(
974            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
975        ) -> Self::Borrowed<'a> {
976            value
977        }
978    }
979
980    unsafe impl fidl::encoding::TypeMarker for ProviderRegisterWatcherRequest {
981        type Owned = Self;
982
983        #[inline(always)]
984        fn inline_align(_context: fidl::encoding::Context) -> usize {
985            4
986        }
987
988        #[inline(always)]
989        fn inline_size(_context: fidl::encoding::Context) -> usize {
990            4
991        }
992    }
993
994    unsafe impl
995        fidl::encoding::Encode<
996            ProviderRegisterWatcherRequest,
997            fidl::encoding::DefaultFuchsiaResourceDialect,
998        > for &mut ProviderRegisterWatcherRequest
999    {
1000        #[inline]
1001        unsafe fn encode(
1002            self,
1003            encoder: &mut fidl::encoding::Encoder<
1004                '_,
1005                fidl::encoding::DefaultFuchsiaResourceDialect,
1006            >,
1007            offset: usize,
1008            _depth: fidl::encoding::Depth,
1009        ) -> fidl::Result<()> {
1010            encoder.debug_check_bounds::<ProviderRegisterWatcherRequest>(offset);
1011            // Delegate to tuple encoding.
1012            fidl::encoding::Encode::<ProviderRegisterWatcherRequest, fidl::encoding::DefaultFuchsiaResourceDialect>::encode(
1013                (
1014                    <fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow(&mut self.watcher),
1015                ),
1016                encoder, offset, _depth
1017            )
1018        }
1019    }
1020    unsafe impl<
1021            T0: fidl::encoding::Encode<
1022                fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
1023                fidl::encoding::DefaultFuchsiaResourceDialect,
1024            >,
1025        >
1026        fidl::encoding::Encode<
1027            ProviderRegisterWatcherRequest,
1028            fidl::encoding::DefaultFuchsiaResourceDialect,
1029        > for (T0,)
1030    {
1031        #[inline]
1032        unsafe fn encode(
1033            self,
1034            encoder: &mut fidl::encoding::Encoder<
1035                '_,
1036                fidl::encoding::DefaultFuchsiaResourceDialect,
1037            >,
1038            offset: usize,
1039            depth: fidl::encoding::Depth,
1040        ) -> fidl::Result<()> {
1041            encoder.debug_check_bounds::<ProviderRegisterWatcherRequest>(offset);
1042            // Zero out padding regions. There's no need to apply masks
1043            // because the unmasked parts will be overwritten by fields.
1044            // Write the fields.
1045            self.0.encode(encoder, offset + 0, depth)?;
1046            Ok(())
1047        }
1048    }
1049
1050    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
1051        for ProviderRegisterWatcherRequest
1052    {
1053        #[inline(always)]
1054        fn new_empty() -> Self {
1055            Self {
1056                watcher: fidl::new_empty!(
1057                    fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
1058                    fidl::encoding::DefaultFuchsiaResourceDialect
1059                ),
1060            }
1061        }
1062
1063        #[inline]
1064        unsafe fn decode(
1065            &mut self,
1066            decoder: &mut fidl::encoding::Decoder<
1067                '_,
1068                fidl::encoding::DefaultFuchsiaResourceDialect,
1069            >,
1070            offset: usize,
1071            _depth: fidl::encoding::Depth,
1072        ) -> fidl::Result<()> {
1073            decoder.debug_check_bounds::<Self>(offset);
1074            // Verify that padding bytes are zero.
1075            fidl::decode!(
1076                fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
1077                fidl::encoding::DefaultFuchsiaResourceDialect,
1078                &mut self.watcher,
1079                decoder,
1080                offset + 0,
1081                _depth
1082            )?;
1083            Ok(())
1084        }
1085    }
1086}