1// WARNING: This file is machine generated by fidlgen.
23#![warn(clippy::all)]
4#![allow(unused_parens, unused_mut, unused_imports, nonstandard_style)]
56use bitflags::bitflags;
7use fidl::client::QueryResponseFut;
8use fidl::encoding::{MessageBufFor, ProxyChannelBox, ResourceDialect};
9use fidl::endpoints::{ControlHandle as _, Responder as _};
10pub use fidl_fuchsia_ui_observation_geometry__common::*;
11use futures::future::{self, MaybeDone, TryFutureExt};
12use zx_status;
1314#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
15pub struct ViewTreeWatcherMarker;
1617impl fidl::endpoints::ProtocolMarker for ViewTreeWatcherMarker {
18type Proxy = ViewTreeWatcherProxy;
19type RequestStream = ViewTreeWatcherRequestStream;
20#[cfg(target_os = "fuchsia")]
21type SynchronousProxy = ViewTreeWatcherSynchronousProxy;
2223const DEBUG_NAME: &'static str = "(anonymous) ViewTreeWatcher";
24}
2526pub trait ViewTreeWatcherProxyInterface: Send + Sync {
27type WatchResponseFut: std::future::Future<Output = Result<WatchResponse, fidl::Error>> + Send;
28fn r#watch(&self) -> Self::WatchResponseFut;
29}
30#[derive(Debug)]
31#[cfg(target_os = "fuchsia")]
32pub struct ViewTreeWatcherSynchronousProxy {
33 client: fidl::client::sync::Client,
34}
3536#[cfg(target_os = "fuchsia")]
37impl fidl::endpoints::SynchronousProxy for ViewTreeWatcherSynchronousProxy {
38type Proxy = ViewTreeWatcherProxy;
39type Protocol = ViewTreeWatcherMarker;
4041fn from_channel(inner: fidl::Channel) -> Self {
42Self::new(inner)
43 }
4445fn into_channel(self) -> fidl::Channel {
46self.client.into_channel()
47 }
4849fn as_channel(&self) -> &fidl::Channel {
50self.client.as_channel()
51 }
52}
5354#[cfg(target_os = "fuchsia")]
55impl ViewTreeWatcherSynchronousProxy {
56pub fn new(channel: fidl::Channel) -> Self {
57let protocol_name = <ViewTreeWatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
58Self { client: fidl::client::sync::Client::new(channel, protocol_name) }
59 }
6061pub fn into_channel(self) -> fidl::Channel {
62self.client.into_channel()
63 }
6465/// Waits until an event arrives and returns it. It is safe for other
66 /// threads to make concurrent requests while waiting for an event.
67pub fn wait_for_event(
68&self,
69 deadline: zx::MonotonicInstant,
70 ) -> Result<ViewTreeWatcherEvent, fidl::Error> {
71 ViewTreeWatcherEvent::decode(self.client.wait_for_event(deadline)?)
72 }
7374/// A method of obtaining view tree snapshots for a particular view.
75 ///
76 /// This call is formulated as a "hanging get" pattern: the client asks for
77 /// a set of recent snapshots, and receives them via the callback. This
78 /// pull-based approach ensures that clients consume events at their own
79 /// pace; events don't clog up the channel in an unbounded manner.
80 ///
81 /// Error Handling. If Error is unset, the client may assume that the
82 /// the response contains updates with complete information over its epoch.
83 ///
84 /// Flow control. The caller is allowed at most one in-flight |Watch| call
85 /// at a time; it is a logical error to have concurrent calls to |Watch|.
86 /// Non-compliance results in channel closure.
87 ///
88 /// Client pacing. The server will dispatch snapshots to the caller on a
89 /// lossless, best-effort basis, but the caller must allocate enough time to
90 /// keep up with new snapshots.
91pub fn r#watch(&self, ___deadline: zx::MonotonicInstant) -> Result<WatchResponse, fidl::Error> {
92let _response = self.client.send_query::<fidl::encoding::EmptyPayload, WatchResponse>(
93 (),
940x3c7670983418477b,
95 fidl::encoding::DynamicFlags::empty(),
96 ___deadline,
97 )?;
98Ok(_response)
99 }
100}
101102#[cfg(target_os = "fuchsia")]
103impl From<ViewTreeWatcherSynchronousProxy> for zx::Handle {
104fn from(value: ViewTreeWatcherSynchronousProxy) -> Self {
105 value.into_channel().into()
106 }
107}
108109#[cfg(target_os = "fuchsia")]
110impl From<fidl::Channel> for ViewTreeWatcherSynchronousProxy {
111fn from(value: fidl::Channel) -> Self {
112Self::new(value)
113 }
114}
115116#[derive(Debug, Clone)]
117pub struct ViewTreeWatcherProxy {
118 client: fidl::client::Client<fidl::encoding::DefaultFuchsiaResourceDialect>,
119}
120121impl fidl::endpoints::Proxy for ViewTreeWatcherProxy {
122type Protocol = ViewTreeWatcherMarker;
123124fn from_channel(inner: ::fidl::AsyncChannel) -> Self {
125Self::new(inner)
126 }
127128fn into_channel(self) -> Result<::fidl::AsyncChannel, Self> {
129self.client.into_channel().map_err(|client| Self { client })
130 }
131132fn as_channel(&self) -> &::fidl::AsyncChannel {
133self.client.as_channel()
134 }
135}
136137impl ViewTreeWatcherProxy {
138/// Create a new Proxy for fuchsia.ui.observation.geometry/ViewTreeWatcher.
139pub fn new(channel: ::fidl::AsyncChannel) -> Self {
140let protocol_name = <ViewTreeWatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME;
141Self { client: fidl::client::Client::new(channel, protocol_name) }
142 }
143144/// Get a Stream of events from the remote end of the protocol.
145 ///
146 /// # Panics
147 ///
148 /// Panics if the event stream was already taken.
149pub fn take_event_stream(&self) -> ViewTreeWatcherEventStream {
150 ViewTreeWatcherEventStream { event_receiver: self.client.take_event_receiver() }
151 }
152153/// A method of obtaining view tree snapshots for a particular view.
154 ///
155 /// This call is formulated as a "hanging get" pattern: the client asks for
156 /// a set of recent snapshots, and receives them via the callback. This
157 /// pull-based approach ensures that clients consume events at their own
158 /// pace; events don't clog up the channel in an unbounded manner.
159 ///
160 /// Error Handling. If Error is unset, the client may assume that the
161 /// the response contains updates with complete information over its epoch.
162 ///
163 /// Flow control. The caller is allowed at most one in-flight |Watch| call
164 /// at a time; it is a logical error to have concurrent calls to |Watch|.
165 /// Non-compliance results in channel closure.
166 ///
167 /// Client pacing. The server will dispatch snapshots to the caller on a
168 /// lossless, best-effort basis, but the caller must allocate enough time to
169 /// keep up with new snapshots.
170pub fn r#watch(
171&self,
172 ) -> fidl::client::QueryResponseFut<WatchResponse, fidl::encoding::DefaultFuchsiaResourceDialect>
173 {
174 ViewTreeWatcherProxyInterface::r#watch(self)
175 }
176}
177178impl ViewTreeWatcherProxyInterface for ViewTreeWatcherProxy {
179type WatchResponseFut = fidl::client::QueryResponseFut<
180 WatchResponse,
181 fidl::encoding::DefaultFuchsiaResourceDialect,
182 >;
183fn r#watch(&self) -> Self::WatchResponseFut {
184fn _decode(
185mut _buf: Result<<fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc, fidl::Error>,
186 ) -> Result<WatchResponse, fidl::Error> {
187let _response = fidl::client::decode_transaction_body::<
188 WatchResponse,
189 fidl::encoding::DefaultFuchsiaResourceDialect,
1900x3c7670983418477b,
191 >(_buf?)?;
192Ok(_response)
193 }
194self.client.send_query_and_decode::<fidl::encoding::EmptyPayload, WatchResponse>(
195 (),
1960x3c7670983418477b,
197 fidl::encoding::DynamicFlags::empty(),
198 _decode,
199 )
200 }
201}
202203pub struct ViewTreeWatcherEventStream {
204 event_receiver: fidl::client::EventReceiver<fidl::encoding::DefaultFuchsiaResourceDialect>,
205}
206207impl std::marker::Unpin for ViewTreeWatcherEventStream {}
208209impl futures::stream::FusedStream for ViewTreeWatcherEventStream {
210fn is_terminated(&self) -> bool {
211self.event_receiver.is_terminated()
212 }
213}
214215impl futures::Stream for ViewTreeWatcherEventStream {
216type Item = Result<ViewTreeWatcherEvent, fidl::Error>;
217218fn poll_next(
219mut self: std::pin::Pin<&mut Self>,
220 cx: &mut std::task::Context<'_>,
221 ) -> std::task::Poll<Option<Self::Item>> {
222match futures::ready!(futures::stream::StreamExt::poll_next_unpin(
223&mut self.event_receiver,
224 cx
225 )?) {
226Some(buf) => std::task::Poll::Ready(Some(ViewTreeWatcherEvent::decode(buf))),
227None => std::task::Poll::Ready(None),
228 }
229 }
230}
231232#[derive(Debug)]
233pub enum ViewTreeWatcherEvent {}
234235impl ViewTreeWatcherEvent {
236/// Decodes a message buffer as a [`ViewTreeWatcherEvent`].
237fn decode(
238mut buf: <fidl::encoding::DefaultFuchsiaResourceDialect as fidl::encoding::ResourceDialect>::MessageBufEtc,
239 ) -> Result<ViewTreeWatcherEvent, fidl::Error> {
240let (bytes, _handles) = buf.split_mut();
241let (tx_header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
242debug_assert_eq!(tx_header.tx_id, 0);
243match tx_header.ordinal {
244_ => Err(fidl::Error::UnknownOrdinal {
245 ordinal: tx_header.ordinal,
246 protocol_name:
247 <ViewTreeWatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
248 }),
249 }
250 }
251}
252253/// A Stream of incoming requests for fuchsia.ui.observation.geometry/ViewTreeWatcher.
254pub struct ViewTreeWatcherRequestStream {
255 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
256 is_terminated: bool,
257}
258259impl std::marker::Unpin for ViewTreeWatcherRequestStream {}
260261impl futures::stream::FusedStream for ViewTreeWatcherRequestStream {
262fn is_terminated(&self) -> bool {
263self.is_terminated
264 }
265}
266267impl fidl::endpoints::RequestStream for ViewTreeWatcherRequestStream {
268type Protocol = ViewTreeWatcherMarker;
269type ControlHandle = ViewTreeWatcherControlHandle;
270271fn from_channel(channel: ::fidl::AsyncChannel) -> Self {
272Self { inner: std::sync::Arc::new(fidl::ServeInner::new(channel)), is_terminated: false }
273 }
274275fn control_handle(&self) -> Self::ControlHandle {
276 ViewTreeWatcherControlHandle { inner: self.inner.clone() }
277 }
278279fn into_inner(
280self,
281 ) -> (::std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>, bool)
282 {
283 (self.inner, self.is_terminated)
284 }
285286fn from_inner(
287 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
288 is_terminated: bool,
289 ) -> Self {
290Self { inner, is_terminated }
291 }
292}
293294impl futures::Stream for ViewTreeWatcherRequestStream {
295type Item = Result<ViewTreeWatcherRequest, fidl::Error>;
296297fn poll_next(
298mut self: std::pin::Pin<&mut Self>,
299 cx: &mut std::task::Context<'_>,
300 ) -> std::task::Poll<Option<Self::Item>> {
301let this = &mut *self;
302if this.inner.check_shutdown(cx) {
303 this.is_terminated = true;
304return std::task::Poll::Ready(None);
305 }
306if this.is_terminated {
307panic!("polled ViewTreeWatcherRequestStream after completion");
308 }
309 fidl::encoding::with_tls_decode_buf::<_, fidl::encoding::DefaultFuchsiaResourceDialect>(
310 |bytes, handles| {
311match this.inner.channel().read_etc(cx, bytes, handles) {
312 std::task::Poll::Ready(Ok(())) => {}
313 std::task::Poll::Pending => return std::task::Poll::Pending,
314 std::task::Poll::Ready(Err(zx_status::Status::PEER_CLOSED)) => {
315 this.is_terminated = true;
316return std::task::Poll::Ready(None);
317 }
318 std::task::Poll::Ready(Err(e)) => {
319return std::task::Poll::Ready(Some(Err(fidl::Error::ServerRequestRead(
320 e.into(),
321 ))))
322 }
323 }
324325// A message has been received from the channel
326let (header, _body_bytes) = fidl::encoding::decode_transaction_header(bytes)?;
327328 std::task::Poll::Ready(Some(match header.ordinal {
3290x3c7670983418477b => {
330 header.validate_request_tx_id(fidl::MethodType::TwoWay)?;
331let mut req = fidl::new_empty!(
332 fidl::encoding::EmptyPayload,
333 fidl::encoding::DefaultFuchsiaResourceDialect
334 );
335 fidl::encoding::Decoder::<fidl::encoding::DefaultFuchsiaResourceDialect>::decode_into::<fidl::encoding::EmptyPayload>(&header, _body_bytes, handles, &mut req)?;
336let control_handle =
337 ViewTreeWatcherControlHandle { inner: this.inner.clone() };
338Ok(ViewTreeWatcherRequest::Watch {
339 responder: ViewTreeWatcherWatchResponder {
340 control_handle: std::mem::ManuallyDrop::new(control_handle),
341 tx_id: header.tx_id,
342 },
343 })
344 }
345_ => Err(fidl::Error::UnknownOrdinal {
346 ordinal: header.ordinal,
347 protocol_name:
348 <ViewTreeWatcherMarker as fidl::endpoints::ProtocolMarker>::DEBUG_NAME,
349 }),
350 }))
351 },
352 )
353 }
354}
355356/// A method of obtaining view tree snapshots for a particular view, the "context
357/// view", and its child views, if any. The returned data is a sequence of
358/// snapshots during the period of observation, which starts at the client's
359/// prior Watch() call's [`epoch_end`] (or zx.Time 0), and ending at the
360/// current [`epoch_end`]. The timebase is ZX_CLOCK_MONOTONIC.
361///
362/// Clients typically obtain a `ViewTreeWatcher` capability from within a test,
363/// and it is not generally possible to obtain outside of a test environment.
364/// For more information see `fuchsia.ui.observation.test.Registry` and
365/// `fuchsia.ui.test.scene.Controller`.
366///
367/// Usage note. With this protocol, a client can watch for changes to the view
368/// tree over which it has authority. For example, if a client owns view A, then
369/// A serves as the context view for A's subtree (i.e., a "root view"), where A
370/// is a parent of view B, and B is a parent of view C. The client can then
371/// observe key lifecycle events in all of A, B, and C, such as newly connected
372/// views, changes to view position and size, etc. In doing so, a client can
373/// gate its actions on changes to the view tree, in a reliable and ergonomic
374/// manner. For example, a client can wait for a descendant view C to become
375/// connected before requesting a focus transfer to C.
376///
377/// Configuration: The context view is determined outside of this protocol.
378///
379/// Frequency: A client can receive one or more snapshots per frame. Clients
380/// should not "count snapshots", as the per-frame snapshot count can be
381/// non-deterministic. Instead, clients should look for specific conditions on
382/// the snapshot state.
383///
384/// Issuance: If the context view is disconnected from a display, no
385/// frames are issued on behalf of the context view, and a Watch() call will
386/// sit quietly.
387///
388/// Lifecycle: The server endpoint is closed when the context view dies.
389#[derive(Debug)]
390pub enum ViewTreeWatcherRequest {
391/// A method of obtaining view tree snapshots for a particular view.
392 ///
393 /// This call is formulated as a "hanging get" pattern: the client asks for
394 /// a set of recent snapshots, and receives them via the callback. This
395 /// pull-based approach ensures that clients consume events at their own
396 /// pace; events don't clog up the channel in an unbounded manner.
397 ///
398 /// Error Handling. If Error is unset, the client may assume that the
399 /// the response contains updates with complete information over its epoch.
400 ///
401 /// Flow control. The caller is allowed at most one in-flight |Watch| call
402 /// at a time; it is a logical error to have concurrent calls to |Watch|.
403 /// Non-compliance results in channel closure.
404 ///
405 /// Client pacing. The server will dispatch snapshots to the caller on a
406 /// lossless, best-effort basis, but the caller must allocate enough time to
407 /// keep up with new snapshots.
408Watch { responder: ViewTreeWatcherWatchResponder },
409}
410411impl ViewTreeWatcherRequest {
412#[allow(irrefutable_let_patterns)]
413pub fn into_watch(self) -> Option<(ViewTreeWatcherWatchResponder)> {
414if let ViewTreeWatcherRequest::Watch { responder } = self {
415Some((responder))
416 } else {
417None
418}
419 }
420421/// Name of the method defined in FIDL
422pub fn method_name(&self) -> &'static str {
423match *self {
424 ViewTreeWatcherRequest::Watch { .. } => "watch",
425 }
426 }
427}
428429#[derive(Debug, Clone)]
430pub struct ViewTreeWatcherControlHandle {
431 inner: std::sync::Arc<fidl::ServeInner<fidl::encoding::DefaultFuchsiaResourceDialect>>,
432}
433434impl fidl::endpoints::ControlHandle for ViewTreeWatcherControlHandle {
435fn shutdown(&self) {
436self.inner.shutdown()
437 }
438fn shutdown_with_epitaph(&self, status: zx_status::Status) {
439self.inner.shutdown_with_epitaph(status)
440 }
441442fn is_closed(&self) -> bool {
443self.inner.channel().is_closed()
444 }
445fn on_closed(&self) -> fidl::OnSignalsRef<'_> {
446self.inner.channel().on_closed()
447 }
448449#[cfg(target_os = "fuchsia")]
450fn signal_peer(
451&self,
452 clear_mask: zx::Signals,
453 set_mask: zx::Signals,
454 ) -> Result<(), zx_status::Status> {
455use fidl::Peered;
456self.inner.channel().signal_peer(clear_mask, set_mask)
457 }
458}
459460impl ViewTreeWatcherControlHandle {}
461462#[must_use = "FIDL methods require a response to be sent"]
463#[derive(Debug)]
464pub struct ViewTreeWatcherWatchResponder {
465 control_handle: std::mem::ManuallyDrop<ViewTreeWatcherControlHandle>,
466 tx_id: u32,
467}
468469/// Set the the channel to be shutdown (see [`ViewTreeWatcherControlHandle::shutdown`])
470/// if the responder is dropped without sending a response, so that the client
471/// doesn't hang. To prevent this behavior, call `drop_without_shutdown`.
472impl std::ops::Drop for ViewTreeWatcherWatchResponder {
473fn drop(&mut self) {
474self.control_handle.shutdown();
475// Safety: drops once, never accessed again
476unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
477 }
478}
479480impl fidl::endpoints::Responder for ViewTreeWatcherWatchResponder {
481type ControlHandle = ViewTreeWatcherControlHandle;
482483fn control_handle(&self) -> &ViewTreeWatcherControlHandle {
484&self.control_handle
485 }
486487fn drop_without_shutdown(mut self) {
488// Safety: drops once, never accessed again due to mem::forget
489unsafe { std::mem::ManuallyDrop::drop(&mut self.control_handle) };
490// Prevent Drop from running (which would shut down the channel)
491std::mem::forget(self);
492 }
493}
494495impl ViewTreeWatcherWatchResponder {
496/// Sends a response to the FIDL transaction.
497 ///
498 /// Sets the channel to shutdown if an error occurs.
499pub fn send(self, mut payload: &WatchResponse) -> Result<(), fidl::Error> {
500let _result = self.send_raw(payload);
501if _result.is_err() {
502self.control_handle.shutdown();
503 }
504self.drop_without_shutdown();
505 _result
506 }
507508/// Similar to "send" but does not shutdown the channel if an error occurs.
509pub fn send_no_shutdown_on_err(self, mut payload: &WatchResponse) -> Result<(), fidl::Error> {
510let _result = self.send_raw(payload);
511self.drop_without_shutdown();
512 _result
513 }
514515fn send_raw(&self, mut payload: &WatchResponse) -> Result<(), fidl::Error> {
516self.control_handle.inner.send::<WatchResponse>(
517 payload,
518self.tx_id,
5190x3c7670983418477b,
520 fidl::encoding::DynamicFlags::empty(),
521 )
522 }
523}
524525mod internal {
526use super::*;
527}