Skip to main content

display_utils/
controller.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use anyhow::Context;
6use display_types::IMAGE_TILING_TYPE_LINEAR;
7
8use fidl::endpoints::ClientEnd;
9use fidl_fuchsia_hardware_display::{
10    self as display, CoordinatorListenerRequest, LayerId as FidlLayerId,
11};
12use fidl_fuchsia_hardware_display_types::{self as display_types};
13use fuchsia_async::{DurationExt as _, TimeoutExt as _};
14use fuchsia_component::client::Service;
15use fuchsia_sync::RwLock;
16use futures::channel::mpsc;
17use futures::{TryFutureExt, TryStreamExt, future};
18use std::fmt;
19use std::sync::Arc;
20use zx::{self as zx, HandleBased};
21
22use crate::INVALID_EVENT_ID;
23use crate::config::{DisplayConfig, LayerConfig};
24use crate::error::{ConfigError, Error, Result};
25use crate::types::{BufferCollectionId, DisplayId, DisplayInfo, Event, EventId, ImageId, LayerId};
26
27const TIMEOUT: zx::MonotonicDuration = zx::MonotonicDuration::from_seconds(2);
28
29/// Client abstraction for the `fuchsia.hardware.display.Coordinator` protocol. Instances can be
30/// safely cloned and passed across threads.
31#[derive(Clone)]
32pub struct Coordinator {
33    inner: Arc<RwLock<CoordinatorInner>>,
34}
35
36struct CoordinatorInner {
37    displays: Vec<DisplayInfo>,
38    proxy: display::CoordinatorProxy,
39    listener_requests: Option<display::CoordinatorListenerRequestStream>,
40
41    // All subscribed vsync listeners and their optional ID filters.
42    vsync_listeners: Vec<(mpsc::UnboundedSender<VsyncEvent>, Option<DisplayId>)>,
43
44    // Simple counter to generate client-assigned integer identifiers.
45    id_counter: u64,
46
47    // Generate stamps for `apply_config()`.
48    stamp_counter: u64,
49}
50
51/// A vsync event payload.
52#[derive(Debug)]
53pub struct VsyncEvent {
54    /// The ID of the display that generated the vsync event.
55    pub id: DisplayId,
56
57    /// The monotonic timestamp of the vsync event.
58    pub timestamp: zx::MonotonicInstant,
59
60    /// The stamp of the latest fully applied display configuration.
61    pub config: display::ConfigStamp,
62}
63
64impl Coordinator {
65    /// Establishes a connection to the display-coordinator device and initialize a `Coordinator`
66    /// instance with the initial set of available displays. The returned `Coordinator` will
67    /// maintain FIDL connection to the underlying device as long as it is alive or the connection
68    /// is closed by the peer.
69    ///
70    /// Returns an error if
71    /// - No display-coordinator device is found within `TIMEOUT`.
72    /// - An initial OnDisplaysChanged event is not received from the display driver within
73    ///   `TIMEOUT` seconds.
74    ///
75    /// Current limitations:
76    ///   - This function connects to the first display-coordinator device that it observes. It
77    ///   currently does not support selection of a specific device if multiple display-coordinator
78    ///   devices are present.
79    // TODO(https://fxbug.dev/42168593): This will currently result in an error if no displays are present on
80    // the system (or if one is not attached within `TIMEOUT`). It wouldn't be neceesary to rely on
81    // a timeout if the display driver sent en event with no displays.
82    pub async fn init() -> Result<Coordinator> {
83        let service_proxy = Service::open(display::ServiceMarker)
84            .context("failed to open display Service")
85            .map_err(Error::DeviceConnectionError)?
86            .watch_for_any()
87            .map_err(Error::DeviceConnectionError)
88            .on_timeout(TIMEOUT.after_now(), || Err(Error::DeviceNotFound))
89            .await?;
90
91        let provider_proxy = service_proxy
92            .connect_to_provider()
93            .context("failed to connect to FIDL provider")
94            .map_err(|x| Error::DeviceConnectionError(x.into()))?;
95
96        let (coordinator_proxy, coordinator_server_end) =
97            fidl::endpoints::create_proxy::<display::CoordinatorMarker>();
98        let (coordinator_listener_client_end, coordinator_listener_requests) =
99            fidl::endpoints::create_request_stream::<display::CoordinatorListenerMarker>();
100
101        // TODO(https://fxbug.dev/42075865): Consider supporting virtcon client
102        // connections.
103        let payload = display::ProviderOpenCoordinatorWithListenerForPrimaryRequest {
104            coordinator: Some(coordinator_server_end),
105            coordinator_listener: Some(coordinator_listener_client_end),
106            __source_breaking: fidl::marker::SourceBreaking,
107        };
108        let () = provider_proxy
109            .open_coordinator_with_listener_for_primary(payload)
110            .await?
111            .map_err(zx::Status::from_raw)?;
112
113        Self::init_with_proxy_and_listener_requests(
114            coordinator_proxy,
115            coordinator_listener_requests,
116        )
117        .await
118    }
119
120    /// Initialize a `Coordinator` instance from pre-established Coordinator and
121    /// CoordinatorListener channels.
122    ///
123    /// Returns an error if
124    /// - An initial OnDisplaysChanged event is not received from the display driver within
125    ///   `TIMEOUT` seconds.
126    // TODO(https://fxbug.dev/42168593): This will currently result in an error if no displays are
127    // present on the system (or if one is not attached within `TIMEOUT`). It wouldn't be neceesary
128    // to rely on a timeout if the display driver sent en event with no displays.
129    pub async fn init_with_proxy_and_listener_requests(
130        coordinator_proxy: display::CoordinatorProxy,
131        mut listener_requests: display::CoordinatorListenerRequestStream,
132    ) -> Result<Coordinator> {
133        let displays = wait_for_initial_displays(&mut listener_requests)
134            .on_timeout(TIMEOUT.after_now(), || Err(Error::NoDisplays))
135            .await?
136            .into_iter()
137            .map(DisplayInfo)
138            .collect::<Vec<_>>();
139        Ok(Coordinator {
140            inner: Arc::new(RwLock::new(CoordinatorInner {
141                proxy: coordinator_proxy,
142                listener_requests: Some(listener_requests),
143                displays,
144                vsync_listeners: Vec::new(),
145                id_counter: 0,
146                stamp_counter: 0,
147            })),
148        })
149    }
150
151    /// Returns a copy of the list of displays that are currently known to be present on the system.
152    pub fn displays(&self) -> Vec<DisplayInfo> {
153        self.inner.read().displays.clone()
154    }
155
156    /// Returns a clone of the underlying FIDL client proxy.
157    ///
158    /// Note: This can be helpful to prevent holding the inner RwLock when awaiting a chained FIDL
159    /// call over a proxy.
160    pub fn proxy(&self) -> display::CoordinatorProxy {
161        self.inner.read().proxy.clone()
162    }
163
164    /// Registers a channel to listen to vsync events.
165    pub fn add_vsync_listener(
166        &self,
167        id: Option<DisplayId>,
168    ) -> Result<mpsc::UnboundedReceiver<VsyncEvent>> {
169        // TODO(armansito): Switch to a bounded channel instead.
170        let (sender, receiver) = mpsc::unbounded::<VsyncEvent>();
171        self.inner.write().vsync_listeners.push((sender, id));
172        Ok(receiver)
173    }
174
175    /// Returns a Future that represents the FIDL event handling task. Once scheduled on an
176    /// executor, this task will continuously handle incoming FIDL events from the display stack
177    /// and the returned Future will not terminate until the FIDL channel is closed.
178    ///
179    /// This task can be scheduled safely on any thread.
180    pub async fn handle_events(&self) -> Result<()> {
181        let inner = self.inner.clone();
182        let mut events = inner.write().listener_requests.take().ok_or(Error::AlreadyRequested)?;
183        while let Some(msg) = events.try_next().await? {
184            match msg {
185                CoordinatorListenerRequest::OnDisplaysChanged {
186                    added,
187                    removed,
188                    control_handle: _,
189                } => {
190                    let removed =
191                        removed.into_iter().map(|id| id.into()).collect::<Vec<DisplayId>>();
192                    inner.read().handle_displays_changed(added, removed);
193                }
194                CoordinatorListenerRequest::OnVsync {
195                    display_id,
196                    timestamp,
197                    applied_config_stamp,
198                    cookie,
199                    control_handle: _,
200                } => {
201                    inner.write().handle_vsync(
202                        display_id.into(),
203                        timestamp,
204                        applied_config_stamp,
205                        cookie,
206                    )?;
207                }
208                _ => continue,
209            }
210        }
211        Ok(())
212    }
213
214    /// Allocates a new virtual hardware layer that is not associated with any display and has no
215    /// configuration.
216    pub async fn create_layer(&self) -> Result<LayerId> {
217        let layer_id = self.inner.write().next_free_layer_id()?;
218        self.proxy().create_layer(&layer_id.into()).await?.map_err(zx::Status::from_raw)?;
219        Ok(layer_id)
220    }
221
222    /// Creates and registers a zircon event with the display driver. The returned event can be
223    /// used as a fence in a display configuration.
224    pub fn create_event(&self) -> Result<Event> {
225        let event = zx::Event::create();
226        let remote = event.duplicate_handle(zx::Rights::SAME_RIGHTS)?;
227        let id = self.inner.write().next_free_event_id()?;
228
229        self.inner.read().proxy.import_event(zx::Event::from(remote), &id.into())?;
230        Ok(Event::new(id, event))
231    }
232
233    /// Apply a display configuration. The client is expected to receive a vsync event once the
234    /// configuration is successfully applied. Returns an error if the FIDL message cannot be sent.
235    pub async fn apply_config(
236        &self,
237        configs: &[DisplayConfig],
238    ) -> std::result::Result<u64, ConfigError> {
239        let proxy = self.proxy();
240        for config in configs {
241            proxy.set_display_layers(
242                &config.id.into(),
243                &config.layers.iter().map(|l| l.id.into()).collect::<Vec<FidlLayerId>>(),
244            )?;
245            for layer in &config.layers {
246                match &layer.config {
247                    LayerConfig::Color { color, display_destination } => {
248                        let fidl_color = fidl_fuchsia_hardware_display_types::Color::from(color);
249                        proxy.set_layer_color_config(
250                            &layer.id.into(),
251                            &fidl_color,
252                            display_destination,
253                        )?;
254                    }
255                    LayerConfig::Primary { image_id, image_metadata, unblock_event, alpha } => {
256                        proxy.set_layer_primary_config(&layer.id.into(), &image_metadata)?;
257                        if let Some(alpha_config) = alpha {
258                            proxy.set_layer_primary_alpha(
259                                &layer.id.into(),
260                                alpha_config.mode,
261                                alpha_config.val,
262                            )?;
263                        }
264                        proxy.set_layer_image2(
265                            &layer.id.into(),
266                            &(*image_id).into(),
267                            &unblock_event.unwrap_or(INVALID_EVENT_ID).into(),
268                        )?;
269                    }
270                }
271            }
272        }
273
274        let result = proxy.check_config().await?;
275        if result != display_types::ConfigResult::Ok {
276            return Err(ConfigError::invalid(result));
277        }
278
279        let config_stamp = self.inner.write().next_config_stamp().unwrap();
280        let payload = fidl_fuchsia_hardware_display::CoordinatorApplyConfig3Request {
281            stamp: Some(fidl_fuchsia_hardware_display::ConfigStamp { value: config_stamp }),
282            ..Default::default()
283        };
284        match proxy.apply_config3(payload) {
285            Ok(()) => Ok(config_stamp),
286            Err(err) => Err(ConfigError::from(err)),
287        }
288    }
289
290    /// Get the config stamp value of the most recent applied config in
291    /// `apply_config`. Returns an error if the FIDL message cannot be sent.
292    pub async fn get_recent_applied_config_stamp(&self) -> std::result::Result<u64, Error> {
293        let proxy = self.proxy();
294        let response = proxy.get_latest_applied_config_stamp().await?;
295        Ok(response.value)
296    }
297
298    /// Import a sysmem buffer collection. The returned `BufferCollectionId` can be used in future
299    /// API calls to refer to the imported collection.
300    pub(crate) async fn import_buffer_collection(
301        &self,
302        token: ClientEnd<fidl_fuchsia_sysmem2::BufferCollectionTokenMarker>,
303    ) -> Result<BufferCollectionId> {
304        let id = self.inner.write().next_free_collection_id()?;
305        let proxy = self.proxy();
306
307        // First import the token.
308        proxy.import_buffer_collection(&id.into(), token).await?.map_err(zx::Status::from_raw)?;
309
310        // Tell the driver to assign any device-specific constraints.
311        // TODO(https://fxbug.dev/42166207): These fields are effectively unused except for `type` in the case
312        // of IMAGE_TYPE_CAPTURE.
313        proxy
314            .set_buffer_collection_constraints(
315                &id.into(),
316                &display_types::ImageBufferUsage { tiling_type: IMAGE_TILING_TYPE_LINEAR },
317            )
318            .await?
319            .map_err(zx::Status::from_raw)?;
320        Ok(id)
321    }
322
323    /// Notify the display driver to release its handle on a previously imported buffer collection.
324    pub(crate) fn release_buffer_collection(&self, id: BufferCollectionId) -> Result<()> {
325        self.inner.read().proxy.release_buffer_collection(&id.into()).map_err(Error::from)
326    }
327
328    /// Register a sysmem buffer collection backed image to the display driver.
329    pub(crate) async fn import_image(
330        &self,
331        collection_id: BufferCollectionId,
332        image_id: ImageId,
333        image_metadata: display_types::ImageMetadata,
334    ) -> Result<()> {
335        self.proxy()
336            .import_image(
337                &image_metadata,
338                &collection_id.into(),
339                0, // buffer_index
340                &image_id.into(),
341            )
342            .await?
343            .map_err(zx::Status::from_raw)?;
344        Ok(())
345    }
346}
347
348// fmt::Debug implementation to allow a `Coordinator` instance to be used with a debug format
349// specifier. We use a custom implementation as not all `Coordinator` members derive fmt::Debug.
350impl fmt::Debug for Coordinator {
351    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
352        f.debug_struct("Coordinator").field("displays", &self.displays()).finish()
353    }
354}
355
356impl CoordinatorInner {
357    fn next_free_collection_id(&mut self) -> Result<BufferCollectionId> {
358        self.id_counter = self.id_counter.checked_add(1).ok_or(Error::IdsExhausted)?;
359        Ok(BufferCollectionId(self.id_counter))
360    }
361
362    fn next_free_event_id(&mut self) -> Result<EventId> {
363        self.id_counter = self.id_counter.checked_add(1).ok_or(Error::IdsExhausted)?;
364        Ok(EventId(self.id_counter))
365    }
366
367    fn next_free_layer_id(&mut self) -> Result<LayerId> {
368        self.id_counter = self.id_counter.checked_add(1).ok_or(Error::IdsExhausted)?;
369        Ok(LayerId(self.id_counter))
370    }
371
372    fn next_config_stamp(&mut self) -> Result<u64> {
373        self.stamp_counter = self.stamp_counter.checked_add(1).ok_or(Error::IdsExhausted)?;
374        Ok(self.stamp_counter)
375    }
376
377    fn handle_displays_changed(&self, _added: Vec<display::Info>, _removed: Vec<DisplayId>) {
378        // TODO(armansito): update the displays list and notify clients. Terminate vsync listeners
379        // that are attached to a removed display.
380    }
381
382    fn handle_vsync(
383        &mut self,
384        display_id: DisplayId,
385        timestamp: zx::MonotonicInstant,
386        applied_config_stamp: display::ConfigStamp,
387        cookie: display::VsyncAckCookie,
388    ) -> Result<()> {
389        if cookie.value != 0 {
390            self.proxy.acknowledge_vsync(cookie.value)?;
391        }
392
393        let mut listeners_to_remove = Vec::new();
394        for (pos, (sender, filter)) in self.vsync_listeners.iter().enumerate() {
395            // Skip the listener if it has a filter that does not match `display_id`.
396            if filter.as_ref().map_or(false, |id| *id != display_id) {
397                continue;
398            }
399            let payload = VsyncEvent { id: display_id, timestamp, config: applied_config_stamp };
400            if let Err(e) = sender.unbounded_send(payload) {
401                if e.is_disconnected() {
402                    listeners_to_remove.push(pos);
403                } else {
404                    return Err(e.into());
405                }
406            }
407        }
408
409        // Clean up disconnected listeners.
410        listeners_to_remove.into_iter().for_each(|pos| {
411            self.vsync_listeners.swap_remove(pos);
412        });
413
414        Ok(())
415    }
416}
417
418// Waits for a single fuchsia.hardware.display.Coordinator.OnDisplaysChanged event and returns the
419// reported displays. By API contract, this event will fire at least once upon initial channel
420// connection if any displays are present. If no displays are present, then the returned Future
421// will not resolve until a display is plugged in.
422async fn wait_for_initial_displays(
423    listener_requests: &mut display::CoordinatorListenerRequestStream,
424) -> Result<Vec<display::Info>> {
425    let mut stream = listener_requests.try_filter_map(|event| match event {
426        CoordinatorListenerRequest::OnDisplaysChanged { added, removed: _, control_handle: _ } => {
427            future::ok(Some(added))
428        }
429        _ => future::ok(None),
430    });
431    stream.try_next().await?.ok_or(Error::NoDisplays)
432}
433
434#[cfg(test)]
435mod tests {
436    use super::{Coordinator, DisplayId, VsyncEvent};
437    use anyhow::{Context, Result, format_err};
438    use assert_matches::assert_matches;
439    use display_mocks::{MockCoordinator, create_proxy_and_mock};
440    use fuchsia_async::TestExecutor;
441    use futures::task::Poll;
442    use futures::{FutureExt, StreamExt, pin_mut, select};
443    use {
444        fidl_fuchsia_hardware_display as display,
445        fidl_fuchsia_hardware_display_types as display_types,
446    };
447
448    async fn init_with_proxy_and_listener_requests(
449        coordinator_proxy: display::CoordinatorProxy,
450        listener_requests: display::CoordinatorListenerRequestStream,
451    ) -> Result<Coordinator> {
452        Coordinator::init_with_proxy_and_listener_requests(coordinator_proxy, listener_requests)
453            .await
454            .context("failed to initialize Coordinator")
455    }
456
457    // Returns a Coordinator and a connected mock FIDL server. This function sets up the initial
458    // "OnDisplaysChanged" event with the given list of `displays`, which `Coordinator` requires
459    // before it can resolve its initialization Future.
460    async fn init_with_displays(
461        displays: &[display::Info],
462    ) -> Result<(Coordinator, MockCoordinator)> {
463        let (coordinator_proxy, listener_requests, mut mock) = create_proxy_and_mock()?;
464        mock.assign_displays(displays.to_vec())?;
465
466        Ok((
467            init_with_proxy_and_listener_requests(coordinator_proxy, listener_requests).await?,
468            mock,
469        ))
470    }
471
472    #[fuchsia::test]
473    async fn test_init_fails_with_no_device_dir() {
474        let result = Coordinator::init().await;
475        assert_matches!(result, Err(_));
476    }
477
478    #[fuchsia::test]
479    async fn test_init_with_no_displays() -> Result<()> {
480        let (coordinator_proxy, listener_requests, mut mock) = create_proxy_and_mock()?;
481        mock.assign_displays([].to_vec())?;
482
483        let coordinator =
484            init_with_proxy_and_listener_requests(coordinator_proxy, listener_requests).await?;
485        assert!(coordinator.displays().is_empty());
486
487        Ok(())
488    }
489
490    // TODO(https://fxbug.dev/42075852): We should have an automated test verifying that
491    // the service provided by driver framework can be opened correctly.
492
493    #[fuchsia::test]
494    async fn test_init_with_displays() -> Result<()> {
495        let displays = [
496            display::Info {
497                id: display_types::DisplayId { value: 1 },
498                modes: Vec::new(),
499                pixel_format: Vec::new(),
500                manufacturer_name: "Foo".to_string(),
501                monitor_name: "what".to_string(),
502                monitor_serial: "".to_string(),
503                horizontal_size_mm: 0,
504                vertical_size_mm: 0,
505                using_fallback_size: false,
506            },
507            display::Info {
508                id: display_types::DisplayId { value: 2 },
509                modes: Vec::new(),
510                pixel_format: Vec::new(),
511                manufacturer_name: "Bar".to_string(),
512                monitor_name: "who".to_string(),
513                monitor_serial: "".to_string(),
514                horizontal_size_mm: 0,
515                vertical_size_mm: 0,
516                using_fallback_size: false,
517            },
518        ]
519        .to_vec();
520        let (coordinator_proxy, listener_requests, mut mock) = create_proxy_and_mock()?;
521        mock.assign_displays(displays.clone())?;
522
523        let coordinator =
524            init_with_proxy_and_listener_requests(coordinator_proxy, listener_requests).await?;
525        assert_eq!(coordinator.displays().len(), 2);
526        assert_eq!(coordinator.displays()[0].0, displays[0]);
527        assert_eq!(coordinator.displays()[1].0, displays[1]);
528
529        Ok(())
530    }
531
532    #[test]
533    fn test_vsync_listener_single() -> Result<()> {
534        // Drive an executor directly for this test to avoid having to rely on timeouts for cases
535        // in which no events are received.
536        let mut executor = TestExecutor::new();
537        let (coordinator, mock) = executor.run_singlethreaded(init_with_displays(&[]))?;
538        let mut vsync = coordinator.add_vsync_listener(None)?;
539
540        const ID: DisplayId = DisplayId(1);
541        const STAMP: display::ConfigStamp = display::ConfigStamp { value: 1 };
542        let event_handlers = async {
543            select! {
544                event = vsync.next() => event.ok_or_else(|| format_err!("did not receive vsync event")),
545                result = coordinator.handle_events().fuse() => {
546                    result.context("FIDL event handler failed")?;
547                    Err(format_err!("FIDL event handler completed before client vsync event"))
548                },
549            }
550        };
551        pin_mut!(event_handlers);
552
553        // Send a single event.
554        mock.emit_vsync_event(ID.0, STAMP)?;
555        let vsync_event = executor.run_until_stalled(&mut event_handlers);
556        assert_matches!(
557            vsync_event,
558            Poll::Ready(Ok(VsyncEvent { id: ID, timestamp: _, config: STAMP }))
559        );
560
561        Ok(())
562    }
563
564    #[test]
565    fn test_vsync_listener_multiple() -> Result<()> {
566        // Drive an executor directly for this test to avoid having to rely on timeouts for cases
567        // in which no events are received.
568        let mut executor = TestExecutor::new();
569        let (coordinator, mock) = executor.run_singlethreaded(init_with_displays(&[]))?;
570        let mut vsync = coordinator.add_vsync_listener(None)?;
571
572        let fidl_server = coordinator.handle_events().fuse();
573        pin_mut!(fidl_server);
574
575        const ID1: DisplayId = DisplayId(1);
576        const ID2: DisplayId = DisplayId(2);
577        const STAMP: display::ConfigStamp = display::ConfigStamp { value: 1 };
578
579        // Queue multiple events.
580        mock.emit_vsync_event(ID1.0, STAMP)?;
581        mock.emit_vsync_event(ID2.0, STAMP)?;
582        mock.emit_vsync_event(ID1.0, STAMP)?;
583
584        // Process the FIDL events. The FIDL server Future should not complete as it runs
585        // indefinitely.
586        let fidl_server_result = executor.run_until_stalled(&mut fidl_server);
587        assert_matches!(fidl_server_result, Poll::Pending);
588
589        // Process the vsync listener.
590        let vsync_event = executor.run_until_stalled(&mut Box::pin(async { vsync.next().await }));
591        assert_matches!(
592            vsync_event,
593            Poll::Ready(Some(VsyncEvent { id: ID1, timestamp: _, config: STAMP }))
594        );
595
596        let vsync_event = executor.run_until_stalled(&mut Box::pin(async { vsync.next().await }));
597        assert_matches!(
598            vsync_event,
599            Poll::Ready(Some(VsyncEvent { id: ID2, timestamp: _, config: STAMP }))
600        );
601
602        let vsync_event = executor.run_until_stalled(&mut Box::pin(async { vsync.next().await }));
603        assert_matches!(
604            vsync_event,
605            Poll::Ready(Some(VsyncEvent { id: ID1, timestamp: _, config: STAMP }))
606        );
607
608        Ok(())
609    }
610
611    #[test]
612    fn test_vsync_listener_display_id_filter() -> Result<()> {
613        // Drive an executor directly for this test to avoid having to rely on timeouts for cases
614        // in which no events are received.
615        let mut executor = TestExecutor::new();
616        let (coordinator, mock) = executor.run_singlethreaded(init_with_displays(&[]))?;
617
618        const ID1: DisplayId = DisplayId(1);
619        const ID2: DisplayId = DisplayId(2);
620        const STAMP: display::ConfigStamp = display::ConfigStamp { value: 1 };
621
622        // Listen to events from ID2.
623        let mut vsync = coordinator.add_vsync_listener(Some(ID2))?;
624        let event_handlers = async {
625            select! {
626                event = vsync.next() => event.ok_or_else(|| format_err!("did not receive vsync event")),
627                result = coordinator.handle_events().fuse() => {
628                    result.context("FIDL event handler failed")?;
629                    Err(format_err!("FIDL event handler completed before client vsync event"))
630                },
631            }
632        };
633        pin_mut!(event_handlers);
634
635        // Event from ID1 should get filtered out and the client should not receive any events.
636        mock.emit_vsync_event(ID1.0, STAMP)?;
637        let vsync_event = executor.run_until_stalled(&mut event_handlers);
638        assert_matches!(vsync_event, Poll::Pending);
639
640        // Event from ID2 should be received.
641        mock.emit_vsync_event(ID2.0, STAMP)?;
642        let vsync_event = executor.run_until_stalled(&mut event_handlers);
643        assert_matches!(
644            vsync_event,
645            Poll::Ready(Ok(VsyncEvent { id: ID2, timestamp: _, config: STAMP }))
646        );
647
648        Ok(())
649    }
650}