1// WARNING: This file is machine generated by fidlgen.
23#![warn(clippy::all)]
4#![allow(unused_parens, unused_mut, unused_imports, nonstandard_style)]
56use bitflags::bitflags;
7use fidl::client::QueryResponseFut;
8use fidl::encoding::{MessageBufFor, ProxyChannelBox, ResourceDialect};
9use fidl::endpoints::{ControlHandle as _, Responder as _};
10pub use fidl_fuchsia_ui_observation_geometry__common::*;
11use futures::future::{self, MaybeDone, TryFutureExt};
12use zx_status;
1314#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
15pub struct ViewTreeWatcherMarker;
1617impl fidl::endpoints::ProtocolMarker for ViewTreeWatcherMarker {
18type Proxy = ViewTreeWatcherProxy;
19type RequestStream = ViewTreeWatcherRequestStream;
20#[cfg(target_os = "fuchsia")]
21type SynchronousProxy = ViewTreeWatcherSynchronousProxy;
2223const DEBUG_NAME: &'static str = "(anonymous) ViewTreeWatcher";
24}
2526pub trait ViewTreeWatcherProxyInterface: Send + Sync {
27type WatchResponseFut: std::future::Future<Output = Result<WatchResponse, fidl::Error>> + Send;
28fn r#watch(&self) -> Self::WatchResponseFut;
29}
30#[derive(Debug)]
31#[cfg(target_os = "fuchsia")]
32pub struct ViewTreeWatcherSynchronousProxy {
33 client: fidl::client::sync::Client,
34}
3536#[cfg(target_os = "fuchsia")]
37impl fidl::endpoints::SynchronousProxy for ViewTreeWatcherSynchronousProxy {
38type Proxy = ViewTreeWatcherProxy;
39type Protocol = ViewTreeWatcherMarker;
4041fn from_channel(inner: fidl::Channel) -> Self {
42Self::new(inner)
43 }
4445fn into_channel(self) -> fidl::Channel {
46self.client.into_channel()
47 }
4849fn as_channel(&self) -> &fidl::Channel {
50self.client.as_channel()
51 }
52}
5354#[cfg(target_os = "fuchsia")]
55impl ViewTreeWatcherSynchronousProxy {
56pub fn new(channel: fidl::Channel) -> Self {
57let protocol_name = <ViewTreeWatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
58Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
59 }
6061pub fn into_channel(self) -> fidl::Channel {
62self.client.into_channel()
63 }
6465/// Waits until an event arrives and returns it. It is safe for other
66 /// threads to make concurrent requests while waiting for an event.
67pub fn wait_for_event(
68&self,
69 deadline: zx::MonotonicInstant,
70 ) -> Result<ViewTreeWatcherEvent, fidl::Error> {
71 ViewTreeWatcherEvent::decode(self.client.wait_for_event(deadline)?)
72 }
7374/// A method of obtaining view tree snapshots for a particular view.
75 ///
76 /// This call is formulated as a "hanging get" pattern: the client asks for
77 /// a set of recent snapshots, and receives them via the callback. This
78 /// pull-based approach ensures that clients consume events at their own
79 /// pace; events don't clog up the channel in an unbounded manner.
80 ///
81 /// Error Handling. If Error is unset, the client may assume that the
82 /// the response contains updates with complete information over its epoch.
83 ///
84 /// Flow control. The caller is allowed at most one in-flight |Watch| call
85 /// at a time; it is a logical error to have concurrent calls to |Watch|.
86 /// Non-compliance results in channel closure.
87 ///
88 /// Client pacing. The server will dispatch snapshots to the caller on a
89 /// lossless, best-effort basis, but the caller must allocate enough time to
90 /// keep up with new snapshots.
91pub fn r#watch(&self, ___deadline: zx::MonotonicInstant) -> Result<WatchResponse, fidl::Error> {
92let _response = self.client.send_query::<fidl::encoding::EmptyPayload, WatchResponse>(
93 (),
940x3c7670983418477b,
95 fidl::encoding::DynamicFlags::empty(),
96 ___deadline,
97 )?;
98Ok(_response)
99 }
100}
101102#[cfg(target_os = "fuchsia")]
103impl From<ViewTreeWatcherSynchronousProxy> for zx::Handle {
104fn from(value: ViewTreeWatcherSynchronousProxy) -> Self {
105 value.into_channel().into()
106 }
107}
108109#[cfg(target_os = "fuchsia")]
110impl From<fidl::Channel> for ViewTreeWatcherSynchronousProxy {
111fn from(value: fidl::Channel) -> Self {
112Self::new(value)
113 }
114}
115116#[cfg(target_os = "fuchsia")]
117impl fidl::endpoints::FromClient for ViewTreeWatcherSynchronousProxy {
118type Protocol = ViewTreeWatcherMarker;
119120fn from_client(value: fidl::endpoints::ClientEnd<ViewTreeWatcherMarker>) -> Self {
121Self::new(value.into_channel())
122 }
123}
124125#[derive(Debug, Clone)]
126pub struct ViewTreeWatcherProxy {
127 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
128}
129130impl fidl::endpoints::Proxy for ViewTreeWatcherProxy {
131type Protocol = ViewTreeWatcherMarker;
132133fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
134Self::new(inner)
135 }
136137fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
138self.client.into_channel().map_err(|client| Self { client })
139 }
140141fn as_channel(&self) -> &::fidl::AsyncChannel {
142self.client.as_channel()
143 }
144}
145146impl ViewTreeWatcherProxy {
147/// Create a new Proxy for fuchsia.ui.observation.geometry/ViewTreeWatcher.
148pub fn new(channel: ::fidl::AsyncChannel) -> Self {
149let protocol_name = <ViewTreeWatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
150Self { client: fidl::client::Client::new(channel, protocol_name) }
151 }
152153/// Get a Stream of events from the remote end of the protocol.
154 ///
155 /// # Panics
156 ///
157 /// Panics if the event stream was already taken.
158pub fn take_event_stream(&self) -> ViewTreeWatcherEventStream {
159 ViewTreeWatcherEventStream { event_receiver: self.client.take_event_receiver() }
160 }
161162/// A method of obtaining view tree snapshots for a particular view.
163 ///
164 /// This call is formulated as a "hanging get" pattern: the client asks for
165 /// a set of recent snapshots, and receives them via the callback. This
166 /// pull-based approach ensures that clients consume events at their own
167 /// pace; events don't clog up the channel in an unbounded manner.
168 ///
169 /// Error Handling. If Error is unset, the client may assume that the
170 /// the response contains updates with complete information over its epoch.
171 ///
172 /// Flow control. The caller is allowed at most one in-flight |Watch| call
173 /// at a time; it is a logical error to have concurrent calls to |Watch|.
174 /// Non-compliance results in channel closure.
175 ///
176 /// Client pacing. The server will dispatch snapshots to the caller on a
177 /// lossless, best-effort basis, but the caller must allocate enough time to
178 /// keep up with new snapshots.
179pub fn r#watch(
180&self,
181 ) -> fidl::client::QueryResponseFut<WatchResponse, fidl::encoding::DefaultFuchsiaResourceDialect>
182 {
183 ViewTreeWatcherProxyInterface::r#watch(self)
184 }
185}
186187impl ViewTreeWatcherProxyInterface for ViewTreeWatcherProxy {
188type WatchResponseFut = fidl::client::QueryResponseFut<
189 WatchResponse,
190 fidl::encoding::DefaultFuchsiaResourceDialect,
191 >;
192fn r#watch(&self) -> Self::WatchResponseFut {
193fn _decode(
194mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
195 ) -> Result<WatchResponse, fidl::Error> {
196let _response = fidl::client::decode_transaction_body::<
197 WatchResponse,
198 fidl::encoding::DefaultFuchsiaResourceDialect,
1990x3c7670983418477b,
200 >(_buf?)?;
201Ok(_response)
202 }
203self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, WatchResponse>(
204 (),
2050x3c7670983418477b,
206 fidl::encoding::DynamicFlags::empty(),
207 _decode,
208 )
209 }
210}
211212pub struct ViewTreeWatcherEventStream {
213 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
214}
215216impl std::marker::Unpin for ViewTreeWatcherEventStream {}
217218impl futures::stream::FusedStream for ViewTreeWatcherEventStream {
219fn is_terminated(&self) -> bool {
220self.event_receiver.is_terminated()
221 }
222}
223224impl futures::Stream for ViewTreeWatcherEventStream {
225type Item = Result<ViewTreeWatcherEvent, fidl::Error>;
226227fn poll_next(
228mut self: std::pin::Pin<&mut Self>,
229 cx: &mut std::task::Context<'_>,
230 ) -> std::task::Poll<Option<Self::Item>> {
231match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
232&mut self.event_receiver,
233 cx
234 )?) {
235Some(buf) => std::task::Poll::Ready(Some(ViewTreeWatcherEvent::decode(buf))),
236None => std::task::Poll::Ready(None),
237 }
238 }
239}
240241#[derive(Debug)]
242pub enum ViewTreeWatcherEvent {}
243244impl ViewTreeWatcherEvent {
245/// Decodes a message buffer as a [`ViewTreeWatcherEvent`].
246fn decode(
247mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
248 ) -> Result<ViewTreeWatcherEvent, fidl::Error> {
249let (bytes, _handles) = buf.split_mut();
250let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
251debug_assert_eq!(tx_header.tx_id, 0);
252match tx_header.ordinal {
253_ => Err(fidl::Error::UnknownOrdinal {
254 ordinal: tx_header.ordinal,
255 protocol_name:
256 <ViewTreeWatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
257 }),
258 }
259 }
260}
261262/// A Stream of incoming requests for fuchsia.ui.observation.geometry/ViewTreeWatcher.
263pub struct ViewTreeWatcherRequestStream {
264 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
265 is_terminated: bool,
266}
267268impl std::marker::Unpin for ViewTreeWatcherRequestStream {}
269270impl futures::stream::FusedStream for ViewTreeWatcherRequestStream {
271fn is_terminated(&self) -> bool {
272self.is_terminated
273 }
274}
275276impl fidl::endpoints::RequestStream for ViewTreeWatcherRequestStream {
277type Protocol = ViewTreeWatcherMarker;
278type ControlHandle = ViewTreeWatcherControlHandle;
279280fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
281Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
282 }
283284fn control_handle(&self) -> Self::ControlHandle {
285 ViewTreeWatcherControlHandle { inner: self.inner.clone() }
286 }
287288fn into_inner(
289self,
290 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
291 {
292 (self.inner, self.is_terminated)
293 }
294295fn from_inner(
296 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
297 is_terminated: bool,
298 ) -> Self {
299Self { inner, is_terminated }
300 }
301}
302303impl futures::Stream for ViewTreeWatcherRequestStream {
304type Item = Result<ViewTreeWatcherRequest, fidl::Error>;
305306fn poll_next(
307mut self: std::pin::Pin<&mut Self>,
308 cx: &mut std::task::Context<'_>,
309 ) -> std::task::Poll<Option<Self::Item>> {
310let this = &mut *self;
311if this.inner.check_shutdown(cx) {
312 this.is_terminated = true;
313return std::task::Poll::Ready(None);
314 }
315if this.is_terminated {
316panic!("polled ViewTreeWatcherRequestStream after completion");
317 }
318 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
319 |bytes, handles| {
320match this.inner.channel().read_etc(cx, bytes, handles) {
321 std::task::Poll::Ready(Ok(())) => {}
322 std::task::Poll::Pending => return std::task::Poll::Pending,
323 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
324 this.is_terminated = true;
325return std::task::Poll::Ready(None);
326 }
327 std::task::Poll::Ready(Err(e)) => {
328return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
329 e.into(),
330 ))))
331 }
332 }
333334// A message has been received from the channel
335let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
336337 std::task::Poll::Ready(Some(match header.ordinal {
3380x3c7670983418477b => {
339 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
340let mut req = fidl::new_empty!(
341 fidl::encoding::EmptyPayload,
342 fidl::encoding::DefaultFuchsiaResourceDialect
343 );
344 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
345let control_handle =
346 ViewTreeWatcherControlHandle { inner: this.inner.clone() };
347Ok(ViewTreeWatcherRequest::Watch {
348 responder: ViewTreeWatcherWatchResponder {
349 control_handle: std::mem::ManuallyDrop::new(control_handle),
350 tx_id: header.tx_id,
351 },
352 })
353 }
354_ => Err(fidl::Error::UnknownOrdinal {
355 ordinal: header.ordinal,
356 protocol_name:
357 <ViewTreeWatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
358 }),
359 }))
360 },
361 )
362 }
363}
364365/// A method of obtaining view tree snapshots for a particular view, the "context
366/// view", and its child views, if any. The returned data is a sequence of
367/// snapshots during the period of observation, which starts at the client's
368/// prior Watch() call's [`epoch_end`] (or zx.Time 0), and ending at the
369/// current [`epoch_end`]. The timebase is ZX_CLOCK_MONOTONIC.
370///
371/// Clients typically obtain a `ViewTreeWatcher` capability from within a test,
372/// and it is not generally possible to obtain outside of a test environment.
373/// For more information see `fuchsia.ui.observation.test.Registry` and
374/// `fuchsia.ui.test.scene.Controller`.
375///
376/// Usage note. With this protocol, a client can watch for changes to the view
377/// tree over which it has authority. For example, if a client owns view A, then
378/// A serves as the context view for A's subtree (i.e., a "root view"), where A
379/// is a parent of view B, and B is a parent of view C. The client can then
380/// observe key lifecycle events in all of A, B, and C, such as newly connected
381/// views, changes to view position and size, etc. In doing so, a client can
382/// gate its actions on changes to the view tree, in a reliable and ergonomic
383/// manner. For example, a client can wait for a descendant view C to become
384/// connected before requesting a focus transfer to C.
385///
386/// Configuration: The context view is determined outside of this protocol.
387///
388/// Frequency: A client can receive one or more snapshots per frame. Clients
389/// should not "count snapshots", as the per-frame snapshot count can be
390/// non-deterministic. Instead, clients should look for specific conditions on
391/// the snapshot state.
392///
393/// Issuance: If the context view is disconnected from a display, no
394/// frames are issued on behalf of the context view, and a Watch() call will
395/// sit quietly.
396///
397/// Lifecycle: The server endpoint is closed when the context view dies.
398#[derive(Debug)]
399pub enum ViewTreeWatcherRequest {
400/// A method of obtaining view tree snapshots for a particular view.
401 ///
402 /// This call is formulated as a "hanging get" pattern: the client asks for
403 /// a set of recent snapshots, and receives them via the callback. This
404 /// pull-based approach ensures that clients consume events at their own
405 /// pace; events don't clog up the channel in an unbounded manner.
406 ///
407 /// Error Handling. If Error is unset, the client may assume that the
408 /// the response contains updates with complete information over its epoch.
409 ///
410 /// Flow control. The caller is allowed at most one in-flight |Watch| call
411 /// at a time; it is a logical error to have concurrent calls to |Watch|.
412 /// Non-compliance results in channel closure.
413 ///
414 /// Client pacing. The server will dispatch snapshots to the caller on a
415 /// lossless, best-effort basis, but the caller must allocate enough time to
416 /// keep up with new snapshots.
417Watch { responder: ViewTreeWatcherWatchResponder },
418}
419420impl ViewTreeWatcherRequest {
421#[allow(irrefutable_let_patterns)]
422pub fn into_watch(self) -> Option<(ViewTreeWatcherWatchResponder)> {
423if let ViewTreeWatcherRequest::Watch { responder } = self {
424Some((responder))
425 } else {
426None
427}
428 }
429430/// Name of the method defined in FIDL
431pub fn method_name(&self) -> &'static str {
432match *self {
433 ViewTreeWatcherRequest::Watch { .. } => "watch",
434 }
435 }
436}
437438#[derive(Debug, Clone)]
439pub struct ViewTreeWatcherControlHandle {
440 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
441}
442443impl fidl::endpoints::ControlHandle for ViewTreeWatcherControlHandle {
444fn shutdown(&self) {
445self.inner.shutdown()
446 }
447fn shutdown_with_epitaph(&self, status: zx_status::Status) {
448self.inner.shutdown_with_epitaph(status)
449 }
450451fn is_closed(&self) -> bool {
452self.inner.channel().is_closed()
453 }
454fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
455self.inner.channel().on_closed()
456 }
457458#[cfg(target_os = "fuchsia")]
459fn signal_peer(
460&self,
461 clear_mask: zx::Signals,
462 set_mask: zx::Signals,
463 ) -> Result<(), zx_status::Status> {
464use fidl::Peered;
465self.inner.channel().signal_peer(clear_mask, set_mask)
466 }
467}
468469impl ViewTreeWatcherControlHandle {}
470471#[must_use = "FIDL methods require a response to be sent"]
472#[derive(Debug)]
473pub struct ViewTreeWatcherWatchResponder {
474 control_handle: std::mem::ManuallyDrop<ViewTreeWatcherControlHandle>,
475 tx_id: u32,
476}
477478/// Set the the channel to be shutdown (see [`ViewTreeWatcherControlHandle::shutdown`])
479/// if the responder is dropped without sending a response, so that the client
480/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
481impl std::ops::Drop for ViewTreeWatcherWatchResponder {
482fn drop(&mut self) {
483self.control_handle.shutdown();
484// Safety: drops once, never accessed again
485unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
486 }
487}
488489impl fidl::endpoints::Responder for ViewTreeWatcherWatchResponder {
490type ControlHandle = ViewTreeWatcherControlHandle;
491492fn control_handle(&self) -> &ViewTreeWatcherControlHandle {
493&self.control_handle
494 }
495496fn drop_without_shutdown(mut self) {
497// Safety: drops once, never accessed again due to mem::forget
498unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
499// Prevent Drop from running (which would shut down the channel)
500std::mem::forget(self);
501 }
502}
503504impl ViewTreeWatcherWatchResponder {
505/// Sends a response to the FIDL transaction.
506 ///
507 /// Sets the channel to shutdown if an error occurs.
508pub fn send(self, mut payload: &WatchResponse) -> Result<(), fidl::Error> {
509let _result = self.send_raw(payload);
510if _result.is_err() {
511self.control_handle.shutdown();
512 }
513self.drop_without_shutdown();
514 _result
515 }
516517/// Similar to "send" but does not shutdown the channel if an error occurs.
518pub fn send_no_shutdown_on_err(self, mut payload: &WatchResponse) -> Result<(), fidl::Error> {
519let _result = self.send_raw(payload);
520self.drop_without_shutdown();
521 _result
522 }
523524fn send_raw(&self, mut payload: &WatchResponse) -> Result<(), fidl::Error> {
525self.control_handle.inner.send::<WatchResponse>(
526 payload,
527self.tx_id,
5280x3c7670983418477b,
529 fidl::encoding::DynamicFlags::empty(),
530 )
531 }
532}
533534mod internal {
535use super::*;
536}