fidl_fuchsia_memorypressure/
fidl_fuchsia_memorypressure.rs

1// WARNING: This file is machine generated by fidlgen.
2
3#![warn(clippy::all)]
4#![allow(unused_parens, unused_mut, unused_imports, nonstandard_style)]
5
6use bitflags::bitflags;
7use fidl::client::QueryResponseFut;
8use fidl::encoding::{MessageBufFor, ProxyChannelBox, ResourceDialect};
9use fidl::endpoints::{ControlHandle as _, Responder as _};
10pub use fidl_fuchsia_memorypressure_common::*;
11use futures::future::{self, MaybeDone, TryFutureExt};
12use zx_status;
13
14#[derive(Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
15pub struct ProviderRegisterWatcherRequest {
16    pub watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
17}
18
19impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
20    for ProviderRegisterWatcherRequest
21{
22}
23
24#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
25pub struct ProviderMarker;
26
27impl fidl::endpoints::ProtocolMarker for ProviderMarker {
28    type Proxy = ProviderProxy;
29    type RequestStream = ProviderRequestStream;
30    #[cfg(target_os = "fuchsia")]
31    type SynchronousProxy = ProviderSynchronousProxy;
32
33    const DEBUG_NAME: &'static str = "fuchsia.memorypressure.Provider";
34}
35impl fidl::endpoints::DiscoverableProtocolMarker for ProviderMarker {}
36
37pub trait ProviderProxyInterface: Send + Sync {
38    fn r#register_watcher(
39        &self,
40        watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
41    ) -> Result<(), fidl::Error>;
42}
43#[derive(Debug)]
44#[cfg(target_os = "fuchsia")]
45pub struct ProviderSynchronousProxy {
46    client: fidl::client::sync::Client,
47}
48
49#[cfg(target_os = "fuchsia")]
50impl fidl::endpoints::SynchronousProxy for ProviderSynchronousProxy {
51    type Proxy = ProviderProxy;
52    type Protocol = ProviderMarker;
53
54    fn from_channel(inner: fidl::Channel) -> Self {
55        Self::new(inner)
56    }
57
58    fn into_channel(self) -> fidl::Channel {
59        self.client.into_channel()
60    }
61
62    fn as_channel(&self) -> &fidl::Channel {
63        self.client.as_channel()
64    }
65}
66
67#[cfg(target_os = "fuchsia")]
68impl ProviderSynchronousProxy {
69    pub fn new(channel: fidl::Channel) -> Self {
70        let protocol_name = <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
71        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
72    }
73
74    pub fn into_channel(self) -> fidl::Channel {
75        self.client.into_channel()
76    }
77
78    /// Waits until an event arrives and returns it. It is safe for other
79    /// threads to make concurrent requests while waiting for an event.
80    pub fn wait_for_event(
81        &self,
82        deadline: zx::MonotonicInstant,
83    ) -> Result<ProviderEvent, fidl::Error> {
84        ProviderEvent::decode(self.client.wait_for_event(deadline)?)
85    }
86
87    /// Used to register for memory pressure level changes.
88    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
89    /// level change messages to the client.
90    ///
91    /// The current memory pressure level is immediately sent to the watcher
92    /// when this method is called.
93    ///
94    /// It is recommended that the root job in a component tree register for changes,
95    /// rather than having individual jobs further down the tree register individually.
96    /// A low client count will help minimize system churn due to a large number of
97    /// memory pressure messages in transit at the same time.
98    /// Also, the more context a job has, the better equipped it will be to react to
99    /// memory pressure by controlling the behavior of children jobs in its tree.
100    pub fn r#register_watcher(
101        &self,
102        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
103    ) -> Result<(), fidl::Error> {
104        self.client.send::<ProviderRegisterWatcherRequest>(
105            (watcher,),
106            0x91e65af25aae4a9,
107            fidl::encoding::DynamicFlags::empty(),
108        )
109    }
110}
111
112#[cfg(target_os = "fuchsia")]
113impl From<ProviderSynchronousProxy> for zx::Handle {
114    fn from(value: ProviderSynchronousProxy) -> Self {
115        value.into_channel().into()
116    }
117}
118
119#[cfg(target_os = "fuchsia")]
120impl From<fidl::Channel> for ProviderSynchronousProxy {
121    fn from(value: fidl::Channel) -> Self {
122        Self::new(value)
123    }
124}
125
126#[derive(Debug, Clone)]
127pub struct ProviderProxy {
128    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
129}
130
131impl fidl::endpoints::Proxy for ProviderProxy {
132    type Protocol = ProviderMarker;
133
134    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
135        Self::new(inner)
136    }
137
138    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
139        self.client.into_channel().map_err(|client| Self { client })
140    }
141
142    fn as_channel(&self) -> &::fidl::AsyncChannel {
143        self.client.as_channel()
144    }
145}
146
147impl ProviderProxy {
148    /// Create a new Proxy for fuchsia.memorypressure/Provider.
149    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
150        let protocol_name = <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
151        Self { client: fidl::client::Client::new(channel, protocol_name) }
152    }
153
154    /// Get a Stream of events from the remote end of the protocol.
155    ///
156    /// # Panics
157    ///
158    /// Panics if the event stream was already taken.
159    pub fn take_event_stream(&self) -> ProviderEventStream {
160        ProviderEventStream { event_receiver: self.client.take_event_receiver() }
161    }
162
163    /// Used to register for memory pressure level changes.
164    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
165    /// level change messages to the client.
166    ///
167    /// The current memory pressure level is immediately sent to the watcher
168    /// when this method is called.
169    ///
170    /// It is recommended that the root job in a component tree register for changes,
171    /// rather than having individual jobs further down the tree register individually.
172    /// A low client count will help minimize system churn due to a large number of
173    /// memory pressure messages in transit at the same time.
174    /// Also, the more context a job has, the better equipped it will be to react to
175    /// memory pressure by controlling the behavior of children jobs in its tree.
176    pub fn r#register_watcher(
177        &self,
178        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
179    ) -> Result<(), fidl::Error> {
180        ProviderProxyInterface::r#register_watcher(self, watcher)
181    }
182}
183
184impl ProviderProxyInterface for ProviderProxy {
185    fn r#register_watcher(
186        &self,
187        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
188    ) -> Result<(), fidl::Error> {
189        self.client.send::<ProviderRegisterWatcherRequest>(
190            (watcher,),
191            0x91e65af25aae4a9,
192            fidl::encoding::DynamicFlags::empty(),
193        )
194    }
195}
196
197pub struct ProviderEventStream {
198    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
199}
200
201impl std::marker::Unpin for ProviderEventStream {}
202
203impl futures::stream::FusedStream for ProviderEventStream {
204    fn is_terminated(&self) -> bool {
205        self.event_receiver.is_terminated()
206    }
207}
208
209impl futures::Stream for ProviderEventStream {
210    type Item = Result<ProviderEvent, fidl::Error>;
211
212    fn poll_next(
213        mut self: std::pin::Pin<&mut Self>,
214        cx: &mut std::task::Context<'_>,
215    ) -> std::task::Poll<Option<Self::Item>> {
216        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
217            &mut self.event_receiver,
218            cx
219        )?) {
220            Some(buf) => std::task::Poll::Ready(Some(ProviderEvent::decode(buf))),
221            None => std::task::Poll::Ready(None),
222        }
223    }
224}
225
226#[derive(Debug)]
227pub enum ProviderEvent {}
228
229impl ProviderEvent {
230    /// Decodes a message buffer as a [`ProviderEvent`].
231    fn decode(
232        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
233    ) -> Result<ProviderEvent, fidl::Error> {
234        let (bytes, _handles) = buf.split_mut();
235        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
236        debug_assert_eq!(tx_header.tx_id, 0);
237        match tx_header.ordinal {
238            _ => Err(fidl::Error::UnknownOrdinal {
239                ordinal: tx_header.ordinal,
240                protocol_name: <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
241            }),
242        }
243    }
244}
245
246/// A Stream of incoming requests for fuchsia.memorypressure/Provider.
247pub struct ProviderRequestStream {
248    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
249    is_terminated: bool,
250}
251
252impl std::marker::Unpin for ProviderRequestStream {}
253
254impl futures::stream::FusedStream for ProviderRequestStream {
255    fn is_terminated(&self) -> bool {
256        self.is_terminated
257    }
258}
259
260impl fidl::endpoints::RequestStream for ProviderRequestStream {
261    type Protocol = ProviderMarker;
262    type ControlHandle = ProviderControlHandle;
263
264    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
265        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
266    }
267
268    fn control_handle(&self) -> Self::ControlHandle {
269        ProviderControlHandle { inner: self.inner.clone() }
270    }
271
272    fn into_inner(
273        self,
274    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
275    {
276        (self.inner, self.is_terminated)
277    }
278
279    fn from_inner(
280        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
281        is_terminated: bool,
282    ) -> Self {
283        Self { inner, is_terminated }
284    }
285}
286
287impl futures::Stream for ProviderRequestStream {
288    type Item = Result<ProviderRequest, fidl::Error>;
289
290    fn poll_next(
291        mut self: std::pin::Pin<&mut Self>,
292        cx: &mut std::task::Context<'_>,
293    ) -> std::task::Poll<Option<Self::Item>> {
294        let this = &mut *self;
295        if this.inner.check_shutdown(cx) {
296            this.is_terminated = true;
297            return std::task::Poll::Ready(None);
298        }
299        if this.is_terminated {
300            panic!("polled ProviderRequestStream after completion");
301        }
302        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
303            |bytes, handles| {
304                match this.inner.channel().read_etc(cx, bytes, handles) {
305                    std::task::Poll::Ready(Ok(())) => {}
306                    std::task::Poll::Pending => return std::task::Poll::Pending,
307                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
308                        this.is_terminated = true;
309                        return std::task::Poll::Ready(None);
310                    }
311                    std::task::Poll::Ready(Err(e)) => {
312                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
313                            e.into(),
314                        ))))
315                    }
316                }
317
318                // A message has been received from the channel
319                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
320
321                std::task::Poll::Ready(Some(match header.ordinal {
322                    0x91e65af25aae4a9 => {
323                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
324                        let mut req = fidl::new_empty!(
325                            ProviderRegisterWatcherRequest,
326                            fidl::encoding::DefaultFuchsiaResourceDialect
327                        );
328                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<ProviderRegisterWatcherRequest>(&header, _body_bytes, handles, &mut req)?;
329                        let control_handle = ProviderControlHandle { inner: this.inner.clone() };
330                        Ok(ProviderRequest::RegisterWatcher {
331                            watcher: req.watcher,
332
333                            control_handle,
334                        })
335                    }
336                    _ => Err(fidl::Error::UnknownOrdinal {
337                        ordinal: header.ordinal,
338                        protocol_name:
339                            <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
340                    }),
341                }))
342            },
343        )
344    }
345}
346
347/// Registration protocol
348#[derive(Debug)]
349pub enum ProviderRequest {
350    /// Used to register for memory pressure level changes.
351    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
352    /// level change messages to the client.
353    ///
354    /// The current memory pressure level is immediately sent to the watcher
355    /// when this method is called.
356    ///
357    /// It is recommended that the root job in a component tree register for changes,
358    /// rather than having individual jobs further down the tree register individually.
359    /// A low client count will help minimize system churn due to a large number of
360    /// memory pressure messages in transit at the same time.
361    /// Also, the more context a job has, the better equipped it will be to react to
362    /// memory pressure by controlling the behavior of children jobs in its tree.
363    RegisterWatcher {
364        watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
365        control_handle: ProviderControlHandle,
366    },
367}
368
369impl ProviderRequest {
370    #[allow(irrefutable_let_patterns)]
371    pub fn into_register_watcher(
372        self,
373    ) -> Option<(fidl::endpoints::ClientEnd<WatcherMarker>, ProviderControlHandle)> {
374        if let ProviderRequest::RegisterWatcher { watcher, control_handle } = self {
375            Some((watcher, control_handle))
376        } else {
377            None
378        }
379    }
380
381    /// Name of the method defined in FIDL
382    pub fn method_name(&self) -> &'static str {
383        match *self {
384            ProviderRequest::RegisterWatcher { .. } => "register_watcher",
385        }
386    }
387}
388
389#[derive(Debug, Clone)]
390pub struct ProviderControlHandle {
391    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
392}
393
394impl fidl::endpoints::ControlHandle for ProviderControlHandle {
395    fn shutdown(&self) {
396        self.inner.shutdown()
397    }
398    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
399        self.inner.shutdown_with_epitaph(status)
400    }
401
402    fn is_closed(&self) -> bool {
403        self.inner.channel().is_closed()
404    }
405    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
406        self.inner.channel().on_closed()
407    }
408
409    #[cfg(target_os = "fuchsia")]
410    fn signal_peer(
411        &self,
412        clear_mask: zx::Signals,
413        set_mask: zx::Signals,
414    ) -> Result<(), zx_status::Status> {
415        use fidl::Peered;
416        self.inner.channel().signal_peer(clear_mask, set_mask)
417    }
418}
419
420impl ProviderControlHandle {}
421
422#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
423pub struct WatcherMarker;
424
425impl fidl::endpoints::ProtocolMarker for WatcherMarker {
426    type Proxy = WatcherProxy;
427    type RequestStream = WatcherRequestStream;
428    #[cfg(target_os = "fuchsia")]
429    type SynchronousProxy = WatcherSynchronousProxy;
430
431    const DEBUG_NAME: &'static str = "(anonymous) Watcher";
432}
433
434pub trait WatcherProxyInterface: Send + Sync {
435    type OnLevelChangedResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
436    fn r#on_level_changed(&self, level: Level) -> Self::OnLevelChangedResponseFut;
437}
438#[derive(Debug)]
439#[cfg(target_os = "fuchsia")]
440pub struct WatcherSynchronousProxy {
441    client: fidl::client::sync::Client,
442}
443
444#[cfg(target_os = "fuchsia")]
445impl fidl::endpoints::SynchronousProxy for WatcherSynchronousProxy {
446    type Proxy = WatcherProxy;
447    type Protocol = WatcherMarker;
448
449    fn from_channel(inner: fidl::Channel) -> Self {
450        Self::new(inner)
451    }
452
453    fn into_channel(self) -> fidl::Channel {
454        self.client.into_channel()
455    }
456
457    fn as_channel(&self) -> &fidl::Channel {
458        self.client.as_channel()
459    }
460}
461
462#[cfg(target_os = "fuchsia")]
463impl WatcherSynchronousProxy {
464    pub fn new(channel: fidl::Channel) -> Self {
465        let protocol_name = <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
466        Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
467    }
468
469    pub fn into_channel(self) -> fidl::Channel {
470        self.client.into_channel()
471    }
472
473    /// Waits until an event arrives and returns it. It is safe for other
474    /// threads to make concurrent requests while waiting for an event.
475    pub fn wait_for_event(
476        &self,
477        deadline: zx::MonotonicInstant,
478    ) -> Result<WatcherEvent, fidl::Error> {
479        WatcherEvent::decode(self.client.wait_for_event(deadline)?)
480    }
481
482    /// Sent to the registered client when the memory pressure level changes.
483    /// `level`: indicates the current memory pressure level.
484    ///
485    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
486    /// registered client can discover the current memory pressure level.
487    ///
488    /// The watcher must immediately reply with a message to acknowledge that it has
489    /// received the level change notification, and has initiated required actions as a
490    /// result. It may then continue to reclaim memory asynchronously after sending
491    /// the acknowledgement.
492    ///
493    /// Some helpful guidelines for clients:
494    /// 1. The watcher will be notified of new pressure level changes only after a reply
495    /// corresponding to the previous message has been received by the provider.
496    /// If multiple level transitions occur during that time, the watcher will be
497    /// notified of the latest pressure level.
498    ///
499    /// 2. The level changes are edge-triggered, and clients are expected to maintain
500    /// local state to track the current pressure level, if required. For example,
501    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
502    /// Some time after this, it might want to trigger an activity that causes a
503    /// fair amount of memory to be allocated. At this point, the job is expected to
504    /// remember that the last pressure level it saw was CRITICAL, and refrain from
505    /// triggering the memory-intensive activity.
506    ///
507    /// 3. As a performance optimization, the provider may decide to skip sending
508    /// messages for some pressure level changes. For example, when oscillating across
509    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
510    /// single transition. The provider might rate-limit messages in this case.
511    /// On a similar note, the provider may decide to send repeated messages at the
512    /// same pressure level, particularly CRITICAL, to indicate that further action
513    /// needs to be taken.
514    pub fn r#on_level_changed(
515        &self,
516        mut level: Level,
517        ___deadline: zx::MonotonicInstant,
518    ) -> Result<(), fidl::Error> {
519        let _response =
520            self.client.send_query::<WatcherOnLevelChangedRequest, fidl::encoding::EmptyPayload>(
521                (level,),
522                0x55d559533407fed9,
523                fidl::encoding::DynamicFlags::empty(),
524                ___deadline,
525            )?;
526        Ok(_response)
527    }
528}
529
530#[cfg(target_os = "fuchsia")]
531impl From<WatcherSynchronousProxy> for zx::Handle {
532    fn from(value: WatcherSynchronousProxy) -> Self {
533        value.into_channel().into()
534    }
535}
536
537#[cfg(target_os = "fuchsia")]
538impl From<fidl::Channel> for WatcherSynchronousProxy {
539    fn from(value: fidl::Channel) -> Self {
540        Self::new(value)
541    }
542}
543
544#[derive(Debug, Clone)]
545pub struct WatcherProxy {
546    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
547}
548
549impl fidl::endpoints::Proxy for WatcherProxy {
550    type Protocol = WatcherMarker;
551
552    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
553        Self::new(inner)
554    }
555
556    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
557        self.client.into_channel().map_err(|client| Self { client })
558    }
559
560    fn as_channel(&self) -> &::fidl::AsyncChannel {
561        self.client.as_channel()
562    }
563}
564
565impl WatcherProxy {
566    /// Create a new Proxy for fuchsia.memorypressure/Watcher.
567    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
568        let protocol_name = <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
569        Self { client: fidl::client::Client::new(channel, protocol_name) }
570    }
571
572    /// Get a Stream of events from the remote end of the protocol.
573    ///
574    /// # Panics
575    ///
576    /// Panics if the event stream was already taken.
577    pub fn take_event_stream(&self) -> WatcherEventStream {
578        WatcherEventStream { event_receiver: self.client.take_event_receiver() }
579    }
580
581    /// Sent to the registered client when the memory pressure level changes.
582    /// `level`: indicates the current memory pressure level.
583    ///
584    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
585    /// registered client can discover the current memory pressure level.
586    ///
587    /// The watcher must immediately reply with a message to acknowledge that it has
588    /// received the level change notification, and has initiated required actions as a
589    /// result. It may then continue to reclaim memory asynchronously after sending
590    /// the acknowledgement.
591    ///
592    /// Some helpful guidelines for clients:
593    /// 1. The watcher will be notified of new pressure level changes only after a reply
594    /// corresponding to the previous message has been received by the provider.
595    /// If multiple level transitions occur during that time, the watcher will be
596    /// notified of the latest pressure level.
597    ///
598    /// 2. The level changes are edge-triggered, and clients are expected to maintain
599    /// local state to track the current pressure level, if required. For example,
600    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
601    /// Some time after this, it might want to trigger an activity that causes a
602    /// fair amount of memory to be allocated. At this point, the job is expected to
603    /// remember that the last pressure level it saw was CRITICAL, and refrain from
604    /// triggering the memory-intensive activity.
605    ///
606    /// 3. As a performance optimization, the provider may decide to skip sending
607    /// messages for some pressure level changes. For example, when oscillating across
608    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
609    /// single transition. The provider might rate-limit messages in this case.
610    /// On a similar note, the provider may decide to send repeated messages at the
611    /// same pressure level, particularly CRITICAL, to indicate that further action
612    /// needs to be taken.
613    pub fn r#on_level_changed(
614        &self,
615        mut level: Level,
616    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
617        WatcherProxyInterface::r#on_level_changed(self, level)
618    }
619}
620
621impl WatcherProxyInterface for WatcherProxy {
622    type OnLevelChangedResponseFut =
623        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
624    fn r#on_level_changed(&self, mut level: Level) -> Self::OnLevelChangedResponseFut {
625        fn _decode(
626            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
627        ) -> Result<(), fidl::Error> {
628            let _response = fidl::client::decode_transaction_body::<
629                fidl::encoding::EmptyPayload,
630                fidl::encoding::DefaultFuchsiaResourceDialect,
631                0x55d559533407fed9,
632            >(_buf?)?;
633            Ok(_response)
634        }
635        self.client.send_query_and_decode::<WatcherOnLevelChangedRequest, ()>(
636            (level,),
637            0x55d559533407fed9,
638            fidl::encoding::DynamicFlags::empty(),
639            _decode,
640        )
641    }
642}
643
644pub struct WatcherEventStream {
645    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
646}
647
648impl std::marker::Unpin for WatcherEventStream {}
649
650impl futures::stream::FusedStream for WatcherEventStream {
651    fn is_terminated(&self) -> bool {
652        self.event_receiver.is_terminated()
653    }
654}
655
656impl futures::Stream for WatcherEventStream {
657    type Item = Result<WatcherEvent, fidl::Error>;
658
659    fn poll_next(
660        mut self: std::pin::Pin<&mut Self>,
661        cx: &mut std::task::Context<'_>,
662    ) -> std::task::Poll<Option<Self::Item>> {
663        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
664            &mut self.event_receiver,
665            cx
666        )?) {
667            Some(buf) => std::task::Poll::Ready(Some(WatcherEvent::decode(buf))),
668            None => std::task::Poll::Ready(None),
669        }
670    }
671}
672
673#[derive(Debug)]
674pub enum WatcherEvent {}
675
676impl WatcherEvent {
677    /// Decodes a message buffer as a [`WatcherEvent`].
678    fn decode(
679        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
680    ) -> Result<WatcherEvent, fidl::Error> {
681        let (bytes, _handles) = buf.split_mut();
682        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
683        debug_assert_eq!(tx_header.tx_id, 0);
684        match tx_header.ordinal {
685            _ => Err(fidl::Error::UnknownOrdinal {
686                ordinal: tx_header.ordinal,
687                protocol_name: <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
688            }),
689        }
690    }
691}
692
693/// A Stream of incoming requests for fuchsia.memorypressure/Watcher.
694pub struct WatcherRequestStream {
695    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
696    is_terminated: bool,
697}
698
699impl std::marker::Unpin for WatcherRequestStream {}
700
701impl futures::stream::FusedStream for WatcherRequestStream {
702    fn is_terminated(&self) -> bool {
703        self.is_terminated
704    }
705}
706
707impl fidl::endpoints::RequestStream for WatcherRequestStream {
708    type Protocol = WatcherMarker;
709    type ControlHandle = WatcherControlHandle;
710
711    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
712        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
713    }
714
715    fn control_handle(&self) -> Self::ControlHandle {
716        WatcherControlHandle { inner: self.inner.clone() }
717    }
718
719    fn into_inner(
720        self,
721    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
722    {
723        (self.inner, self.is_terminated)
724    }
725
726    fn from_inner(
727        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
728        is_terminated: bool,
729    ) -> Self {
730        Self { inner, is_terminated }
731    }
732}
733
734impl futures::Stream for WatcherRequestStream {
735    type Item = Result<WatcherRequest, fidl::Error>;
736
737    fn poll_next(
738        mut self: std::pin::Pin<&mut Self>,
739        cx: &mut std::task::Context<'_>,
740    ) -> std::task::Poll<Option<Self::Item>> {
741        let this = &mut *self;
742        if this.inner.check_shutdown(cx) {
743            this.is_terminated = true;
744            return std::task::Poll::Ready(None);
745        }
746        if this.is_terminated {
747            panic!("polled WatcherRequestStream after completion");
748        }
749        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
750            |bytes, handles| {
751                match this.inner.channel().read_etc(cx, bytes, handles) {
752                    std::task::Poll::Ready(Ok(())) => {}
753                    std::task::Poll::Pending => return std::task::Poll::Pending,
754                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
755                        this.is_terminated = true;
756                        return std::task::Poll::Ready(None);
757                    }
758                    std::task::Poll::Ready(Err(e)) => {
759                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
760                            e.into(),
761                        ))))
762                    }
763                }
764
765                // A message has been received from the channel
766                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
767
768                std::task::Poll::Ready(Some(match header.ordinal {
769                    0x55d559533407fed9 => {
770                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
771                        let mut req = fidl::new_empty!(
772                            WatcherOnLevelChangedRequest,
773                            fidl::encoding::DefaultFuchsiaResourceDialect
774                        );
775                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<WatcherOnLevelChangedRequest>(&header, _body_bytes, handles, &mut req)?;
776                        let control_handle = WatcherControlHandle { inner: this.inner.clone() };
777                        Ok(WatcherRequest::OnLevelChanged {
778                            level: req.level,
779
780                            responder: WatcherOnLevelChangedResponder {
781                                control_handle: std::mem::ManuallyDrop::new(control_handle),
782                                tx_id: header.tx_id,
783                            },
784                        })
785                    }
786                    _ => Err(fidl::Error::UnknownOrdinal {
787                        ordinal: header.ordinal,
788                        protocol_name:
789                            <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
790                    }),
791                }))
792            },
793        )
794    }
795}
796
797/// Watcher protocol
798/// To be implemented by clients who wish to be notified on memory pressure level changes.
799#[derive(Debug)]
800pub enum WatcherRequest {
801    /// Sent to the registered client when the memory pressure level changes.
802    /// `level`: indicates the current memory pressure level.
803    ///
804    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
805    /// registered client can discover the current memory pressure level.
806    ///
807    /// The watcher must immediately reply with a message to acknowledge that it has
808    /// received the level change notification, and has initiated required actions as a
809    /// result. It may then continue to reclaim memory asynchronously after sending
810    /// the acknowledgement.
811    ///
812    /// Some helpful guidelines for clients:
813    /// 1. The watcher will be notified of new pressure level changes only after a reply
814    /// corresponding to the previous message has been received by the provider.
815    /// If multiple level transitions occur during that time, the watcher will be
816    /// notified of the latest pressure level.
817    ///
818    /// 2. The level changes are edge-triggered, and clients are expected to maintain
819    /// local state to track the current pressure level, if required. For example,
820    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
821    /// Some time after this, it might want to trigger an activity that causes a
822    /// fair amount of memory to be allocated. At this point, the job is expected to
823    /// remember that the last pressure level it saw was CRITICAL, and refrain from
824    /// triggering the memory-intensive activity.
825    ///
826    /// 3. As a performance optimization, the provider may decide to skip sending
827    /// messages for some pressure level changes. For example, when oscillating across
828    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
829    /// single transition. The provider might rate-limit messages in this case.
830    /// On a similar note, the provider may decide to send repeated messages at the
831    /// same pressure level, particularly CRITICAL, to indicate that further action
832    /// needs to be taken.
833    OnLevelChanged { level: Level, responder: WatcherOnLevelChangedResponder },
834}
835
836impl WatcherRequest {
837    #[allow(irrefutable_let_patterns)]
838    pub fn into_on_level_changed(self) -> Option<(Level, WatcherOnLevelChangedResponder)> {
839        if let WatcherRequest::OnLevelChanged { level, responder } = self {
840            Some((level, responder))
841        } else {
842            None
843        }
844    }
845
846    /// Name of the method defined in FIDL
847    pub fn method_name(&self) -> &'static str {
848        match *self {
849            WatcherRequest::OnLevelChanged { .. } => "on_level_changed",
850        }
851    }
852}
853
854#[derive(Debug, Clone)]
855pub struct WatcherControlHandle {
856    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
857}
858
859impl fidl::endpoints::ControlHandle for WatcherControlHandle {
860    fn shutdown(&self) {
861        self.inner.shutdown()
862    }
863    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
864        self.inner.shutdown_with_epitaph(status)
865    }
866
867    fn is_closed(&self) -> bool {
868        self.inner.channel().is_closed()
869    }
870    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
871        self.inner.channel().on_closed()
872    }
873
874    #[cfg(target_os = "fuchsia")]
875    fn signal_peer(
876        &self,
877        clear_mask: zx::Signals,
878        set_mask: zx::Signals,
879    ) -> Result<(), zx_status::Status> {
880        use fidl::Peered;
881        self.inner.channel().signal_peer(clear_mask, set_mask)
882    }
883}
884
885impl WatcherControlHandle {}
886
887#[must_use = "FIDL methods require a response to be sent"]
888#[derive(Debug)]
889pub struct WatcherOnLevelChangedResponder {
890    control_handle: std::mem::ManuallyDrop<WatcherControlHandle>,
891    tx_id: u32,
892}
893
894/// Set the the channel to be shutdown (see [`WatcherControlHandle::shutdown`])
895/// if the responder is dropped without sending a response, so that the client
896/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
897impl std::ops::Drop for WatcherOnLevelChangedResponder {
898    fn drop(&mut self) {
899        self.control_handle.shutdown();
900        // Safety: drops once, never accessed again
901        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
902    }
903}
904
905impl fidl::endpoints::Responder for WatcherOnLevelChangedResponder {
906    type ControlHandle = WatcherControlHandle;
907
908    fn control_handle(&self) -> &WatcherControlHandle {
909        &self.control_handle
910    }
911
912    fn drop_without_shutdown(mut self) {
913        // Safety: drops once, never accessed again due to mem::forget
914        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
915        // Prevent Drop from running (which would shut down the channel)
916        std::mem::forget(self);
917    }
918}
919
920impl WatcherOnLevelChangedResponder {
921    /// Sends a response to the FIDL transaction.
922    ///
923    /// Sets the channel to shutdown if an error occurs.
924    pub fn send(self) -> Result<(), fidl::Error> {
925        let _result = self.send_raw();
926        if _result.is_err() {
927            self.control_handle.shutdown();
928        }
929        self.drop_without_shutdown();
930        _result
931    }
932
933    /// Similar to "send" but does not shutdown the channel if an error occurs.
934    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
935        let _result = self.send_raw();
936        self.drop_without_shutdown();
937        _result
938    }
939
940    fn send_raw(&self) -> Result<(), fidl::Error> {
941        self.control_handle.inner.send::<fidl::encoding::EmptyPayload>(
942            (),
943            self.tx_id,
944            0x55d559533407fed9,
945            fidl::encoding::DynamicFlags::empty(),
946        )
947    }
948}
949
950mod internal {
951    use super::*;
952
953    impl fidl::encoding::ResourceTypeMarker for ProviderRegisterWatcherRequest {
954        type Borrowed<'a> = &'a mut Self;
955        fn take_or_borrow<'a>(
956            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
957        ) -> Self::Borrowed<'a> {
958            value
959        }
960    }
961
962    unsafe impl fidl::encoding::TypeMarker for ProviderRegisterWatcherRequest {
963        type Owned = Self;
964
965        #[inline(always)]
966        fn inline_align(_context: fidl::encoding::Context) -> usize {
967            4
968        }
969
970        #[inline(always)]
971        fn inline_size(_context: fidl::encoding::Context) -> usize {
972            4
973        }
974    }
975
976    unsafe impl
977        fidl::encoding::Encode<
978            ProviderRegisterWatcherRequest,
979            fidl::encoding::DefaultFuchsiaResourceDialect,
980        > for &mut ProviderRegisterWatcherRequest
981    {
982        #[inline]
983        unsafe fn encode(
984            self,
985            encoder: &mut fidl::encoding::Encoder<
986                '_,
987                fidl::encoding::DefaultFuchsiaResourceDialect,
988            >,
989            offset: usize,
990            _depth: fidl::encoding::Depth,
991        ) -> fidl::Result<()> {
992            encoder.debug_check_bounds::<ProviderRegisterWatcherRequest>(offset);
993            // Delegate to tuple encoding.
994            fidl::encoding::Encode::<ProviderRegisterWatcherRequest, fidl::encoding::DefaultFuchsiaResourceDialect>::encode(
995                (
996                    <fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow(&mut self.watcher),
997                ),
998                encoder, offset, _depth
999            )
1000        }
1001    }
1002    unsafe impl<
1003            T0: fidl::encoding::Encode<
1004                fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
1005                fidl::encoding::DefaultFuchsiaResourceDialect,
1006            >,
1007        >
1008        fidl::encoding::Encode<
1009            ProviderRegisterWatcherRequest,
1010            fidl::encoding::DefaultFuchsiaResourceDialect,
1011        > for (T0,)
1012    {
1013        #[inline]
1014        unsafe fn encode(
1015            self,
1016            encoder: &mut fidl::encoding::Encoder<
1017                '_,
1018                fidl::encoding::DefaultFuchsiaResourceDialect,
1019            >,
1020            offset: usize,
1021            depth: fidl::encoding::Depth,
1022        ) -> fidl::Result<()> {
1023            encoder.debug_check_bounds::<ProviderRegisterWatcherRequest>(offset);
1024            // Zero out padding regions. There's no need to apply masks
1025            // because the unmasked parts will be overwritten by fields.
1026            // Write the fields.
1027            self.0.encode(encoder, offset + 0, depth)?;
1028            Ok(())
1029        }
1030    }
1031
1032    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
1033        for ProviderRegisterWatcherRequest
1034    {
1035        #[inline(always)]
1036        fn new_empty() -> Self {
1037            Self {
1038                watcher: fidl::new_empty!(
1039                    fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
1040                    fidl::encoding::DefaultFuchsiaResourceDialect
1041                ),
1042            }
1043        }
1044
1045        #[inline]
1046        unsafe fn decode(
1047            &mut self,
1048            decoder: &mut fidl::encoding::Decoder<
1049                '_,
1050                fidl::encoding::DefaultFuchsiaResourceDialect,
1051            >,
1052            offset: usize,
1053            _depth: fidl::encoding::Depth,
1054        ) -> fidl::Result<()> {
1055            decoder.debug_check_bounds::<Self>(offset);
1056            // Verify that padding bytes are zero.
1057            fidl::decode!(
1058                fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
1059                fidl::encoding::DefaultFuchsiaResourceDialect,
1060                &mut self.watcher,
1061                decoder,
1062                offset + 0,
1063                _depth
1064            )?;
1065            Ok(())
1066        }
1067    }
1068}