Skip to main content

fidl_fuchsia_memorypressure/
fidl_fuchsia_memorypressure.rs

1// WARNING: This file is machine generated by fidlgen.
2
3#![warn(clippy::all)]
4#![allow(unused_parens, unused_mut, unused_imports, nonstandard_style)]
5
6use bitflags::bitflags;
7use fidl::client::QueryResponseFut;
8use fidl::encoding::{MessageBufFor, ProxyChannelBox, ResourceDialect};
9use fidl::endpoints::{ControlHandle as _, Responder as _};
10pub use fidl_fuchsia_memorypressure__common::*;
11use futures::future::{self, MaybeDone, TryFutureExt};
12use zx_status;
13
14#[derive(Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
15pub struct ProviderRegisterWatcherRequest {
16    pub watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
17}
18
19impl fidl::Standalone<fidl::encoding::DefaultFuchsiaResourceDialect>
20    for ProviderRegisterWatcherRequest
21{
22}
23
24#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
25pub struct ProviderMarker;
26
27impl fidl::endpoints::ProtocolMarker for ProviderMarker {
28    type Proxy = ProviderProxy;
29    type RequestStream = ProviderRequestStream;
30    #[cfg(target_os = "fuchsia")]
31    type SynchronousProxy = ProviderSynchronousProxy;
32
33    const DEBUG_NAME: &'static str = "fuchsia.memorypressure.Provider";
34}
35impl fidl::endpoints::DiscoverableProtocolMarker for ProviderMarker {}
36
37pub trait ProviderProxyInterface: Send + Sync {
38    fn r#register_watcher(
39        &self,
40        watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
41    ) -> Result<(), fidl::Error>;
42}
43#[derive(Debug)]
44#[cfg(target_os = "fuchsia")]
45pub struct ProviderSynchronousProxy {
46    client: fidl::client::sync::Client,
47}
48
49#[cfg(target_os = "fuchsia")]
50impl fidl::endpoints::SynchronousProxy for ProviderSynchronousProxy {
51    type Proxy = ProviderProxy;
52    type Protocol = ProviderMarker;
53
54    fn from_channel(inner: fidl::Channel) -> Self {
55        Self::new(inner)
56    }
57
58    fn into_channel(self) -> fidl::Channel {
59        self.client.into_channel()
60    }
61
62    fn as_channel(&self) -> &fidl::Channel {
63        self.client.as_channel()
64    }
65}
66
67#[cfg(target_os = "fuchsia")]
68impl ProviderSynchronousProxy {
69    pub fn new(channel: fidl::Channel) -> Self {
70        Self { client: fidl::client::sync::Client::new(channel) }
71    }
72
73    pub fn into_channel(self) -> fidl::Channel {
74        self.client.into_channel()
75    }
76
77    /// Waits until an event arrives and returns it. It is safe for other
78    /// threads to make concurrent requests while waiting for an event.
79    pub fn wait_for_event(
80        &self,
81        deadline: zx::MonotonicInstant,
82    ) -> Result<ProviderEvent, fidl::Error> {
83        ProviderEvent::decode(self.client.wait_for_event::<ProviderMarker>(deadline)?)
84    }
85
86    /// Used to register for memory pressure level changes.
87    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
88    /// level change messages to the client.
89    ///
90    /// The current memory pressure level is immediately sent to the watcher
91    /// when this method is called.
92    ///
93    /// It is recommended that the root job in a component tree register for changes,
94    /// rather than having individual jobs further down the tree register individually.
95    /// A low client count will help minimize system churn due to a large number of
96    /// memory pressure messages in transit at the same time.
97    /// Also, the more context a job has, the better equipped it will be to react to
98    /// memory pressure by controlling the behavior of children jobs in its tree.
99    pub fn r#register_watcher(
100        &self,
101        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
102    ) -> Result<(), fidl::Error> {
103        self.client.send::<ProviderRegisterWatcherRequest>(
104            (watcher,),
105            0x91e65af25aae4a9,
106            fidl::encoding::DynamicFlags::empty(),
107        )
108    }
109}
110
111#[cfg(target_os = "fuchsia")]
112impl From<ProviderSynchronousProxy> for zx::NullableHandle {
113    fn from(value: ProviderSynchronousProxy) -> Self {
114        value.into_channel().into()
115    }
116}
117
118#[cfg(target_os = "fuchsia")]
119impl From<fidl::Channel> for ProviderSynchronousProxy {
120    fn from(value: fidl::Channel) -> Self {
121        Self::new(value)
122    }
123}
124
125#[cfg(target_os = "fuchsia")]
126impl fidl::endpoints::FromClient for ProviderSynchronousProxy {
127    type Protocol = ProviderMarker;
128
129    fn from_client(value: fidl::endpoints::ClientEnd<ProviderMarker>) -> Self {
130        Self::new(value.into_channel())
131    }
132}
133
134#[derive(Debug, Clone)]
135pub struct ProviderProxy {
136    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
137}
138
139impl fidl::endpoints::Proxy for ProviderProxy {
140    type Protocol = ProviderMarker;
141
142    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
143        Self::new(inner)
144    }
145
146    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
147        self.client.into_channel().map_err(|client| Self { client })
148    }
149
150    fn as_channel(&self) -> &::fidl::AsyncChannel {
151        self.client.as_channel()
152    }
153}
154
155impl ProviderProxy {
156    /// Create a new Proxy for fuchsia.memorypressure/Provider.
157    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
158        let protocol_name = <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
159        Self { client: fidl::client::Client::new(channel, protocol_name) }
160    }
161
162    /// Get a Stream of events from the remote end of the protocol.
163    ///
164    /// # Panics
165    ///
166    /// Panics if the event stream was already taken.
167    pub fn take_event_stream(&self) -> ProviderEventStream {
168        ProviderEventStream { event_receiver: self.client.take_event_receiver() }
169    }
170
171    /// Used to register for memory pressure level changes.
172    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
173    /// level change messages to the client.
174    ///
175    /// The current memory pressure level is immediately sent to the watcher
176    /// when this method is called.
177    ///
178    /// It is recommended that the root job in a component tree register for changes,
179    /// rather than having individual jobs further down the tree register individually.
180    /// A low client count will help minimize system churn due to a large number of
181    /// memory pressure messages in transit at the same time.
182    /// Also, the more context a job has, the better equipped it will be to react to
183    /// memory pressure by controlling the behavior of children jobs in its tree.
184    pub fn r#register_watcher(
185        &self,
186        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
187    ) -> Result<(), fidl::Error> {
188        ProviderProxyInterface::r#register_watcher(self, watcher)
189    }
190}
191
192impl ProviderProxyInterface for ProviderProxy {
193    fn r#register_watcher(
194        &self,
195        mut watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
196    ) -> Result<(), fidl::Error> {
197        self.client.send::<ProviderRegisterWatcherRequest>(
198            (watcher,),
199            0x91e65af25aae4a9,
200            fidl::encoding::DynamicFlags::empty(),
201        )
202    }
203}
204
205pub struct ProviderEventStream {
206    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
207}
208
209impl std::marker::Unpin for ProviderEventStream {}
210
211impl futures::stream::FusedStream for ProviderEventStream {
212    fn is_terminated(&self) -> bool {
213        self.event_receiver.is_terminated()
214    }
215}
216
217impl futures::Stream for ProviderEventStream {
218    type Item = Result<ProviderEvent, fidl::Error>;
219
220    fn poll_next(
221        mut self: std::pin::Pin<&mut Self>,
222        cx: &mut std::task::Context<'_>,
223    ) -> std::task::Poll<Option<Self::Item>> {
224        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
225            &mut self.event_receiver,
226            cx
227        )?) {
228            Some(buf) => std::task::Poll::Ready(Some(ProviderEvent::decode(buf))),
229            None => std::task::Poll::Ready(None),
230        }
231    }
232}
233
234#[derive(Debug)]
235pub enum ProviderEvent {}
236
237impl ProviderEvent {
238    /// Decodes a message buffer as a [`ProviderEvent`].
239    fn decode(
240        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
241    ) -> Result<ProviderEvent, fidl::Error> {
242        let (bytes, _handles) = buf.split_mut();
243        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
244        debug_assert_eq!(tx_header.tx_id, 0);
245        match tx_header.ordinal {
246            _ => Err(fidl::Error::UnknownOrdinal {
247                ordinal: tx_header.ordinal,
248                protocol_name: <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
249            }),
250        }
251    }
252}
253
254/// A Stream of incoming requests for fuchsia.memorypressure/Provider.
255pub struct ProviderRequestStream {
256    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
257    is_terminated: bool,
258}
259
260impl std::marker::Unpin for ProviderRequestStream {}
261
262impl futures::stream::FusedStream for ProviderRequestStream {
263    fn is_terminated(&self) -> bool {
264        self.is_terminated
265    }
266}
267
268impl fidl::endpoints::RequestStream for ProviderRequestStream {
269    type Protocol = ProviderMarker;
270    type ControlHandle = ProviderControlHandle;
271
272    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
273        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
274    }
275
276    fn control_handle(&self) -> Self::ControlHandle {
277        ProviderControlHandle { inner: self.inner.clone() }
278    }
279
280    fn into_inner(
281        self,
282    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
283    {
284        (self.inner, self.is_terminated)
285    }
286
287    fn from_inner(
288        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
289        is_terminated: bool,
290    ) -> Self {
291        Self { inner, is_terminated }
292    }
293}
294
295impl futures::Stream for ProviderRequestStream {
296    type Item = Result<ProviderRequest, fidl::Error>;
297
298    fn poll_next(
299        mut self: std::pin::Pin<&mut Self>,
300        cx: &mut std::task::Context<'_>,
301    ) -> std::task::Poll<Option<Self::Item>> {
302        let this = &mut *self;
303        if this.inner.check_shutdown(cx) {
304            this.is_terminated = true;
305            return std::task::Poll::Ready(None);
306        }
307        if this.is_terminated {
308            panic!("polled ProviderRequestStream after completion");
309        }
310        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
311            |bytes, handles| {
312                match this.inner.channel().read_etc(cx, bytes, handles) {
313                    std::task::Poll::Ready(Ok(())) => {}
314                    std::task::Poll::Pending => return std::task::Poll::Pending,
315                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
316                        this.is_terminated = true;
317                        return std::task::Poll::Ready(None);
318                    }
319                    std::task::Poll::Ready(Err(e)) => {
320                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
321                            e.into(),
322                        ))));
323                    }
324                }
325
326                // A message has been received from the channel
327                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
328
329                std::task::Poll::Ready(Some(match header.ordinal {
330                    0x91e65af25aae4a9 => {
331                        header.validate_request_tx_id(fidl::MethodType::OneWay)?;
332                        let mut req = fidl::new_empty!(
333                            ProviderRegisterWatcherRequest,
334                            fidl::encoding::DefaultFuchsiaResourceDialect
335                        );
336                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<ProviderRegisterWatcherRequest>(&header, _body_bytes, handles, &mut req)?;
337                        let control_handle = ProviderControlHandle { inner: this.inner.clone() };
338                        Ok(ProviderRequest::RegisterWatcher {
339                            watcher: req.watcher,
340
341                            control_handle,
342                        })
343                    }
344                    _ => Err(fidl::Error::UnknownOrdinal {
345                        ordinal: header.ordinal,
346                        protocol_name:
347                            <ProviderMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
348                    }),
349                }))
350            },
351        )
352    }
353}
354
355/// Registration protocol
356#[derive(Debug)]
357pub enum ProviderRequest {
358    /// Used to register for memory pressure level changes.
359    /// `watcher`: memory pressure `Watcher` channel that the `Provider` will use to send
360    /// level change messages to the client.
361    ///
362    /// The current memory pressure level is immediately sent to the watcher
363    /// when this method is called.
364    ///
365    /// It is recommended that the root job in a component tree register for changes,
366    /// rather than having individual jobs further down the tree register individually.
367    /// A low client count will help minimize system churn due to a large number of
368    /// memory pressure messages in transit at the same time.
369    /// Also, the more context a job has, the better equipped it will be to react to
370    /// memory pressure by controlling the behavior of children jobs in its tree.
371    RegisterWatcher {
372        watcher: fidl::endpoints::ClientEnd<WatcherMarker>,
373        control_handle: ProviderControlHandle,
374    },
375}
376
377impl ProviderRequest {
378    #[allow(irrefutable_let_patterns)]
379    pub fn into_register_watcher(
380        self,
381    ) -> Option<(fidl::endpoints::ClientEnd<WatcherMarker>, ProviderControlHandle)> {
382        if let ProviderRequest::RegisterWatcher { watcher, control_handle } = self {
383            Some((watcher, control_handle))
384        } else {
385            None
386        }
387    }
388
389    /// Name of the method defined in FIDL
390    pub fn method_name(&self) -> &'static str {
391        match *self {
392            ProviderRequest::RegisterWatcher { .. } => "register_watcher",
393        }
394    }
395}
396
397#[derive(Debug, Clone)]
398pub struct ProviderControlHandle {
399    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
400}
401
402impl fidl::endpoints::ControlHandle for ProviderControlHandle {
403    fn shutdown(&self) {
404        self.inner.shutdown()
405    }
406
407    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
408        self.inner.shutdown_with_epitaph(status)
409    }
410
411    fn is_closed(&self) -> bool {
412        self.inner.channel().is_closed()
413    }
414    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
415        self.inner.channel().on_closed()
416    }
417
418    #[cfg(target_os = "fuchsia")]
419    fn signal_peer(
420        &self,
421        clear_mask: zx::Signals,
422        set_mask: zx::Signals,
423    ) -> Result<(), zx_status::Status> {
424        use fidl::Peered;
425        self.inner.channel().signal_peer(clear_mask, set_mask)
426    }
427}
428
429impl ProviderControlHandle {}
430
431#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
432pub struct WatcherMarker;
433
434impl fidl::endpoints::ProtocolMarker for WatcherMarker {
435    type Proxy = WatcherProxy;
436    type RequestStream = WatcherRequestStream;
437    #[cfg(target_os = "fuchsia")]
438    type SynchronousProxy = WatcherSynchronousProxy;
439
440    const DEBUG_NAME: &'static str = "(anonymous) Watcher";
441}
442
443pub trait WatcherProxyInterface: Send + Sync {
444    type OnLevelChangedResponseFut: std::future::Future<Output = Result<(), fidl::Error>> + Send;
445    fn r#on_level_changed(&self, level: Level) -> Self::OnLevelChangedResponseFut;
446}
447#[derive(Debug)]
448#[cfg(target_os = "fuchsia")]
449pub struct WatcherSynchronousProxy {
450    client: fidl::client::sync::Client,
451}
452
453#[cfg(target_os = "fuchsia")]
454impl fidl::endpoints::SynchronousProxy for WatcherSynchronousProxy {
455    type Proxy = WatcherProxy;
456    type Protocol = WatcherMarker;
457
458    fn from_channel(inner: fidl::Channel) -> Self {
459        Self::new(inner)
460    }
461
462    fn into_channel(self) -> fidl::Channel {
463        self.client.into_channel()
464    }
465
466    fn as_channel(&self) -> &fidl::Channel {
467        self.client.as_channel()
468    }
469}
470
471#[cfg(target_os = "fuchsia")]
472impl WatcherSynchronousProxy {
473    pub fn new(channel: fidl::Channel) -> Self {
474        Self { client: fidl::client::sync::Client::new(channel) }
475    }
476
477    pub fn into_channel(self) -> fidl::Channel {
478        self.client.into_channel()
479    }
480
481    /// Waits until an event arrives and returns it. It is safe for other
482    /// threads to make concurrent requests while waiting for an event.
483    pub fn wait_for_event(
484        &self,
485        deadline: zx::MonotonicInstant,
486    ) -> Result<WatcherEvent, fidl::Error> {
487        WatcherEvent::decode(self.client.wait_for_event::<WatcherMarker>(deadline)?)
488    }
489
490    /// Sent to the registered client when the memory pressure level changes.
491    /// `level`: indicates the current memory pressure level.
492    ///
493    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
494    /// registered client can discover the current memory pressure level.
495    ///
496    /// The watcher must immediately reply with a message to acknowledge that it has
497    /// received the level change notification, and has initiated required actions as a
498    /// result. It may then continue to reclaim memory asynchronously after sending
499    /// the acknowledgement.
500    ///
501    /// Some helpful guidelines for clients:
502    /// 1. The watcher will be notified of new pressure level changes only after a reply
503    /// corresponding to the previous message has been received by the provider.
504    /// If multiple level transitions occur during that time, the watcher will be
505    /// notified of the latest pressure level.
506    ///
507    /// 2. The level changes are edge-triggered, and clients are expected to maintain
508    /// local state to track the current pressure level, if required. For example,
509    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
510    /// Some time after this, it might want to trigger an activity that causes a
511    /// fair amount of memory to be allocated. At this point, the job is expected to
512    /// remember that the last pressure level it saw was CRITICAL, and refrain from
513    /// triggering the memory-intensive activity.
514    ///
515    /// 3. As a performance optimization, the provider may decide to skip sending
516    /// messages for some pressure level changes. For example, when oscillating across
517    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
518    /// single transition. The provider might rate-limit messages in this case.
519    /// On a similar note, the provider may decide to send repeated messages at the
520    /// same pressure level, particularly CRITICAL, to indicate that further action
521    /// needs to be taken.
522    pub fn r#on_level_changed(
523        &self,
524        mut level: Level,
525        ___deadline: zx::MonotonicInstant,
526    ) -> Result<(), fidl::Error> {
527        let _response = self.client.send_query::<
528            WatcherOnLevelChangedRequest,
529            fidl::encoding::EmptyPayload,
530            WatcherMarker,
531        >(
532            (level,),
533            0x55d559533407fed9,
534            fidl::encoding::DynamicFlags::empty(),
535            ___deadline,
536        )?;
537        Ok(_response)
538    }
539}
540
541#[cfg(target_os = "fuchsia")]
542impl From<WatcherSynchronousProxy> for zx::NullableHandle {
543    fn from(value: WatcherSynchronousProxy) -> Self {
544        value.into_channel().into()
545    }
546}
547
548#[cfg(target_os = "fuchsia")]
549impl From<fidl::Channel> for WatcherSynchronousProxy {
550    fn from(value: fidl::Channel) -> Self {
551        Self::new(value)
552    }
553}
554
555#[cfg(target_os = "fuchsia")]
556impl fidl::endpoints::FromClient for WatcherSynchronousProxy {
557    type Protocol = WatcherMarker;
558
559    fn from_client(value: fidl::endpoints::ClientEnd<WatcherMarker>) -> Self {
560        Self::new(value.into_channel())
561    }
562}
563
564#[derive(Debug, Clone)]
565pub struct WatcherProxy {
566    client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
567}
568
569impl fidl::endpoints::Proxy for WatcherProxy {
570    type Protocol = WatcherMarker;
571
572    fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
573        Self::new(inner)
574    }
575
576    fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
577        self.client.into_channel().map_err(|client| Self { client })
578    }
579
580    fn as_channel(&self) -> &::fidl::AsyncChannel {
581        self.client.as_channel()
582    }
583}
584
585impl WatcherProxy {
586    /// Create a new Proxy for fuchsia.memorypressure/Watcher.
587    pub fn new(channel: ::fidl::AsyncChannel) -> Self {
588        let protocol_name = <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
589        Self { client: fidl::client::Client::new(channel, protocol_name) }
590    }
591
592    /// Get a Stream of events from the remote end of the protocol.
593    ///
594    /// # Panics
595    ///
596    /// Panics if the event stream was already taken.
597    pub fn take_event_stream(&self) -> WatcherEventStream {
598        WatcherEventStream { event_receiver: self.client.take_event_receiver() }
599    }
600
601    /// Sent to the registered client when the memory pressure level changes.
602    /// `level`: indicates the current memory pressure level.
603    ///
604    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
605    /// registered client can discover the current memory pressure level.
606    ///
607    /// The watcher must immediately reply with a message to acknowledge that it has
608    /// received the level change notification, and has initiated required actions as a
609    /// result. It may then continue to reclaim memory asynchronously after sending
610    /// the acknowledgement.
611    ///
612    /// Some helpful guidelines for clients:
613    /// 1. The watcher will be notified of new pressure level changes only after a reply
614    /// corresponding to the previous message has been received by the provider.
615    /// If multiple level transitions occur during that time, the watcher will be
616    /// notified of the latest pressure level.
617    ///
618    /// 2. The level changes are edge-triggered, and clients are expected to maintain
619    /// local state to track the current pressure level, if required. For example,
620    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
621    /// Some time after this, it might want to trigger an activity that causes a
622    /// fair amount of memory to be allocated. At this point, the job is expected to
623    /// remember that the last pressure level it saw was CRITICAL, and refrain from
624    /// triggering the memory-intensive activity.
625    ///
626    /// 3. As a performance optimization, the provider may decide to skip sending
627    /// messages for some pressure level changes. For example, when oscillating across
628    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
629    /// single transition. The provider might rate-limit messages in this case.
630    /// On a similar note, the provider may decide to send repeated messages at the
631    /// same pressure level, particularly CRITICAL, to indicate that further action
632    /// needs to be taken.
633    pub fn r#on_level_changed(
634        &self,
635        mut level: Level,
636    ) -> fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect> {
637        WatcherProxyInterface::r#on_level_changed(self, level)
638    }
639}
640
641impl WatcherProxyInterface for WatcherProxy {
642    type OnLevelChangedResponseFut =
643        fidl::client::QueryResponseFut<(), fidl::encoding::DefaultFuchsiaResourceDialect>;
644    fn r#on_level_changed(&self, mut level: Level) -> Self::OnLevelChangedResponseFut {
645        fn _decode(
646            mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
647        ) -> Result<(), fidl::Error> {
648            let _response = fidl::client::decode_transaction_body::<
649                fidl::encoding::EmptyPayload,
650                fidl::encoding::DefaultFuchsiaResourceDialect,
651                0x55d559533407fed9,
652            >(_buf?)?;
653            Ok(_response)
654        }
655        self.client.send_query_and_decode::<WatcherOnLevelChangedRequest, ()>(
656            (level,),
657            0x55d559533407fed9,
658            fidl::encoding::DynamicFlags::empty(),
659            _decode,
660        )
661    }
662}
663
664pub struct WatcherEventStream {
665    event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
666}
667
668impl std::marker::Unpin for WatcherEventStream {}
669
670impl futures::stream::FusedStream for WatcherEventStream {
671    fn is_terminated(&self) -> bool {
672        self.event_receiver.is_terminated()
673    }
674}
675
676impl futures::Stream for WatcherEventStream {
677    type Item = Result<WatcherEvent, fidl::Error>;
678
679    fn poll_next(
680        mut self: std::pin::Pin<&mut Self>,
681        cx: &mut std::task::Context<'_>,
682    ) -> std::task::Poll<Option<Self::Item>> {
683        match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
684            &mut self.event_receiver,
685            cx
686        )?) {
687            Some(buf) => std::task::Poll::Ready(Some(WatcherEvent::decode(buf))),
688            None => std::task::Poll::Ready(None),
689        }
690    }
691}
692
693#[derive(Debug)]
694pub enum WatcherEvent {}
695
696impl WatcherEvent {
697    /// Decodes a message buffer as a [`WatcherEvent`].
698    fn decode(
699        mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
700    ) -> Result<WatcherEvent, fidl::Error> {
701        let (bytes, _handles) = buf.split_mut();
702        let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
703        debug_assert_eq!(tx_header.tx_id, 0);
704        match tx_header.ordinal {
705            _ => Err(fidl::Error::UnknownOrdinal {
706                ordinal: tx_header.ordinal,
707                protocol_name: <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
708            }),
709        }
710    }
711}
712
713/// A Stream of incoming requests for fuchsia.memorypressure/Watcher.
714pub struct WatcherRequestStream {
715    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
716    is_terminated: bool,
717}
718
719impl std::marker::Unpin for WatcherRequestStream {}
720
721impl futures::stream::FusedStream for WatcherRequestStream {
722    fn is_terminated(&self) -> bool {
723        self.is_terminated
724    }
725}
726
727impl fidl::endpoints::RequestStream for WatcherRequestStream {
728    type Protocol = WatcherMarker;
729    type ControlHandle = WatcherControlHandle;
730
731    fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
732        Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
733    }
734
735    fn control_handle(&self) -> Self::ControlHandle {
736        WatcherControlHandle { inner: self.inner.clone() }
737    }
738
739    fn into_inner(
740        self,
741    ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
742    {
743        (self.inner, self.is_terminated)
744    }
745
746    fn from_inner(
747        inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
748        is_terminated: bool,
749    ) -> Self {
750        Self { inner, is_terminated }
751    }
752}
753
754impl futures::Stream for WatcherRequestStream {
755    type Item = Result<WatcherRequest, fidl::Error>;
756
757    fn poll_next(
758        mut self: std::pin::Pin<&mut Self>,
759        cx: &mut std::task::Context<'_>,
760    ) -> std::task::Poll<Option<Self::Item>> {
761        let this = &mut *self;
762        if this.inner.check_shutdown(cx) {
763            this.is_terminated = true;
764            return std::task::Poll::Ready(None);
765        }
766        if this.is_terminated {
767            panic!("polled WatcherRequestStream after completion");
768        }
769        fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
770            |bytes, handles| {
771                match this.inner.channel().read_etc(cx, bytes, handles) {
772                    std::task::Poll::Ready(Ok(())) => {}
773                    std::task::Poll::Pending => return std::task::Poll::Pending,
774                    std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
775                        this.is_terminated = true;
776                        return std::task::Poll::Ready(None);
777                    }
778                    std::task::Poll::Ready(Err(e)) => {
779                        return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
780                            e.into(),
781                        ))));
782                    }
783                }
784
785                // A message has been received from the channel
786                let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
787
788                std::task::Poll::Ready(Some(match header.ordinal {
789                    0x55d559533407fed9 => {
790                        header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
791                        let mut req = fidl::new_empty!(
792                            WatcherOnLevelChangedRequest,
793                            fidl::encoding::DefaultFuchsiaResourceDialect
794                        );
795                        fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<WatcherOnLevelChangedRequest>(&header, _body_bytes, handles, &mut req)?;
796                        let control_handle = WatcherControlHandle { inner: this.inner.clone() };
797                        Ok(WatcherRequest::OnLevelChanged {
798                            level: req.level,
799
800                            responder: WatcherOnLevelChangedResponder {
801                                control_handle: std::mem::ManuallyDrop::new(control_handle),
802                                tx_id: header.tx_id,
803                            },
804                        })
805                    }
806                    _ => Err(fidl::Error::UnknownOrdinal {
807                        ordinal: header.ordinal,
808                        protocol_name:
809                            <WatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
810                    }),
811                }))
812            },
813        )
814    }
815}
816
817/// Watcher protocol
818/// To be implemented by clients who wish to be notified on memory pressure level changes.
819#[derive(Debug)]
820pub enum WatcherRequest {
821    /// Sent to the registered client when the memory pressure level changes.
822    /// `level`: indicates the current memory pressure level.
823    ///
824    /// Will also be invoked on initial connection via `RegisterWatcher`, so that a newly
825    /// registered client can discover the current memory pressure level.
826    ///
827    /// The watcher must immediately reply with a message to acknowledge that it has
828    /// received the level change notification, and has initiated required actions as a
829    /// result. It may then continue to reclaim memory asynchronously after sending
830    /// the acknowledgement.
831    ///
832    /// Some helpful guidelines for clients:
833    /// 1. The watcher will be notified of new pressure level changes only after a reply
834    /// corresponding to the previous message has been received by the provider.
835    /// If multiple level transitions occur during that time, the watcher will be
836    /// notified of the latest pressure level.
837    ///
838    /// 2. The level changes are edge-triggered, and clients are expected to maintain
839    /// local state to track the current pressure level, if required. For example,
840    /// a job might be notified of a CRITICAL level and drop all its caches as a result.
841    /// Some time after this, it might want to trigger an activity that causes a
842    /// fair amount of memory to be allocated. At this point, the job is expected to
843    /// remember that the last pressure level it saw was CRITICAL, and refrain from
844    /// triggering the memory-intensive activity.
845    ///
846    /// 3. As a performance optimization, the provider may decide to skip sending
847    /// messages for some pressure level changes. For example, when oscillating across
848    /// the NORMAL / WARNING boundary, it might not be worth notifying clients of every
849    /// single transition. The provider might rate-limit messages in this case.
850    /// On a similar note, the provider may decide to send repeated messages at the
851    /// same pressure level, particularly CRITICAL, to indicate that further action
852    /// needs to be taken.
853    OnLevelChanged { level: Level, responder: WatcherOnLevelChangedResponder },
854}
855
856impl WatcherRequest {
857    #[allow(irrefutable_let_patterns)]
858    pub fn into_on_level_changed(self) -> Option<(Level, WatcherOnLevelChangedResponder)> {
859        if let WatcherRequest::OnLevelChanged { level, responder } = self {
860            Some((level, responder))
861        } else {
862            None
863        }
864    }
865
866    /// Name of the method defined in FIDL
867    pub fn method_name(&self) -> &'static str {
868        match *self {
869            WatcherRequest::OnLevelChanged { .. } => "on_level_changed",
870        }
871    }
872}
873
874#[derive(Debug, Clone)]
875pub struct WatcherControlHandle {
876    inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
877}
878
879impl fidl::endpoints::ControlHandle for WatcherControlHandle {
880    fn shutdown(&self) {
881        self.inner.shutdown()
882    }
883
884    fn shutdown_with_epitaph(&self, status: zx_status::Status) {
885        self.inner.shutdown_with_epitaph(status)
886    }
887
888    fn is_closed(&self) -> bool {
889        self.inner.channel().is_closed()
890    }
891    fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
892        self.inner.channel().on_closed()
893    }
894
895    #[cfg(target_os = "fuchsia")]
896    fn signal_peer(
897        &self,
898        clear_mask: zx::Signals,
899        set_mask: zx::Signals,
900    ) -> Result<(), zx_status::Status> {
901        use fidl::Peered;
902        self.inner.channel().signal_peer(clear_mask, set_mask)
903    }
904}
905
906impl WatcherControlHandle {}
907
908#[must_use = "FIDL methods require a response to be sent"]
909#[derive(Debug)]
910pub struct WatcherOnLevelChangedResponder {
911    control_handle: std::mem::ManuallyDrop<WatcherControlHandle>,
912    tx_id: u32,
913}
914
915/// Set the the channel to be shutdown (see [`WatcherControlHandle::shutdown`])
916/// if the responder is dropped without sending a response, so that the client
917/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
918impl std::ops::Drop for WatcherOnLevelChangedResponder {
919    fn drop(&mut self) {
920        self.control_handle.shutdown();
921        // Safety: drops once, never accessed again
922        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
923    }
924}
925
926impl fidl::endpoints::Responder for WatcherOnLevelChangedResponder {
927    type ControlHandle = WatcherControlHandle;
928
929    fn control_handle(&self) -> &WatcherControlHandle {
930        &self.control_handle
931    }
932
933    fn drop_without_shutdown(mut self) {
934        // Safety: drops once, never accessed again due to mem::forget
935        unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
936        // Prevent Drop from running (which would shut down the channel)
937        std::mem::forget(self);
938    }
939}
940
941impl WatcherOnLevelChangedResponder {
942    /// Sends a response to the FIDL transaction.
943    ///
944    /// Sets the channel to shutdown if an error occurs.
945    pub fn send(self) -> Result<(), fidl::Error> {
946        let _result = self.send_raw();
947        if _result.is_err() {
948            self.control_handle.shutdown();
949        }
950        self.drop_without_shutdown();
951        _result
952    }
953
954    /// Similar to "send" but does not shutdown the channel if an error occurs.
955    pub fn send_no_shutdown_on_err(self) -> Result<(), fidl::Error> {
956        let _result = self.send_raw();
957        self.drop_without_shutdown();
958        _result
959    }
960
961    fn send_raw(&self) -> Result<(), fidl::Error> {
962        self.control_handle.inner.send::<fidl::encoding::EmptyPayload>(
963            (),
964            self.tx_id,
965            0x55d559533407fed9,
966            fidl::encoding::DynamicFlags::empty(),
967        )
968    }
969}
970
971mod internal {
972    use super::*;
973
974    impl fidl::encoding::ResourceTypeMarker for ProviderRegisterWatcherRequest {
975        type Borrowed<'a> = &'a mut Self;
976        fn take_or_borrow<'a>(
977            value: &'a mut <Self as fidl::encoding::TypeMarker>::Owned,
978        ) -> Self::Borrowed<'a> {
979            value
980        }
981    }
982
983    unsafe impl fidl::encoding::TypeMarker for ProviderRegisterWatcherRequest {
984        type Owned = Self;
985
986        #[inline(always)]
987        fn inline_align(_context: fidl::encoding::Context) -> usize {
988            4
989        }
990
991        #[inline(always)]
992        fn inline_size(_context: fidl::encoding::Context) -> usize {
993            4
994        }
995    }
996
997    unsafe impl
998        fidl::encoding::Encode<
999            ProviderRegisterWatcherRequest,
1000            fidl::encoding::DefaultFuchsiaResourceDialect,
1001        > for &mut ProviderRegisterWatcherRequest
1002    {
1003        #[inline]
1004        unsafe fn encode(
1005            self,
1006            encoder: &mut fidl::encoding::Encoder<
1007                '_,
1008                fidl::encoding::DefaultFuchsiaResourceDialect,
1009            >,
1010            offset: usize,
1011            _depth: fidl::encoding::Depth,
1012        ) -> fidl::Result<()> {
1013            encoder.debug_check_bounds::<ProviderRegisterWatcherRequest>(offset);
1014            // Delegate to tuple encoding.
1015            fidl::encoding::Encode::<ProviderRegisterWatcherRequest, fidl::encoding::DefaultFuchsiaResourceDialect>::encode(
1016                (
1017                    <fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>> as fidl::encoding::ResourceTypeMarker>::take_or_borrow(&mut self.watcher),
1018                ),
1019                encoder, offset, _depth
1020            )
1021        }
1022    }
1023    unsafe impl<
1024        T0: fidl::encoding::Encode<
1025                fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
1026                fidl::encoding::DefaultFuchsiaResourceDialect,
1027            >,
1028    >
1029        fidl::encoding::Encode<
1030            ProviderRegisterWatcherRequest,
1031            fidl::encoding::DefaultFuchsiaResourceDialect,
1032        > for (T0,)
1033    {
1034        #[inline]
1035        unsafe fn encode(
1036            self,
1037            encoder: &mut fidl::encoding::Encoder<
1038                '_,
1039                fidl::encoding::DefaultFuchsiaResourceDialect,
1040            >,
1041            offset: usize,
1042            depth: fidl::encoding::Depth,
1043        ) -> fidl::Result<()> {
1044            encoder.debug_check_bounds::<ProviderRegisterWatcherRequest>(offset);
1045            // Zero out padding regions. There's no need to apply masks
1046            // because the unmasked parts will be overwritten by fields.
1047            // Write the fields.
1048            self.0.encode(encoder, offset + 0, depth)?;
1049            Ok(())
1050        }
1051    }
1052
1053    impl fidl::encoding::Decode<Self, fidl::encoding::DefaultFuchsiaResourceDialect>
1054        for ProviderRegisterWatcherRequest
1055    {
1056        #[inline(always)]
1057        fn new_empty() -> Self {
1058            Self {
1059                watcher: fidl::new_empty!(
1060                    fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
1061                    fidl::encoding::DefaultFuchsiaResourceDialect
1062                ),
1063            }
1064        }
1065
1066        #[inline]
1067        unsafe fn decode(
1068            &mut self,
1069            decoder: &mut fidl::encoding::Decoder<
1070                '_,
1071                fidl::encoding::DefaultFuchsiaResourceDialect,
1072            >,
1073            offset: usize,
1074            _depth: fidl::encoding::Depth,
1075        ) -> fidl::Result<()> {
1076            decoder.debug_check_bounds::<Self>(offset);
1077            // Verify that padding bytes are zero.
1078            fidl::decode!(
1079                fidl::encoding::Endpoint<fidl::endpoints::ClientEnd<WatcherMarker>>,
1080                fidl::encoding::DefaultFuchsiaResourceDialect,
1081                &mut self.watcher,
1082                decoder,
1083                offset + 0,
1084                _depth
1085            )?;
1086            Ok(())
1087        }
1088    }
1089}