Skip to main content

display_utils/
controller.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use anyhow::Context;
6use display_types::IMAGE_TILING_TYPE_LINEAR;
7
8use fidl::endpoints::ClientEnd;
9use fidl_fuchsia_hardware_display::{
10    self as display, CoordinatorListenerRequest, LayerId as FidlLayerId,
11};
12use fidl_fuchsia_hardware_display_types::{self as display_types};
13use fuchsia_async::{DurationExt as _, TimeoutExt as _};
14use fuchsia_component::client::Service;
15use fuchsia_sync::RwLock;
16use futures::channel::mpsc;
17use futures::{TryFutureExt, TryStreamExt, future};
18use std::fmt;
19use std::sync::Arc;
20use zx::{self as zx, HandleBased};
21
22use crate::INVALID_EVENT_ID;
23use crate::config::{DisplayConfig, LayerConfig};
24use crate::error::{ConfigError, Error, Result};
25use crate::types::{
26    BufferCollectionId, ClientPriority, DisplayId, DisplayInfo, Event, EventId, ImageId, LayerId,
27};
28
29const TIMEOUT: zx::MonotonicDuration = zx::MonotonicDuration::from_seconds(2);
30
31/// Client abstraction for the `fuchsia.hardware.display.Coordinator` protocol. Instances can be
32/// safely cloned and passed across threads.
33#[derive(Clone)]
34pub struct Coordinator {
35    inner: Arc<RwLock<CoordinatorInner>>,
36}
37
38struct CoordinatorInner {
39    displays: Vec<DisplayInfo>,
40    proxy: display::CoordinatorProxy,
41    listener_requests: Option<display::CoordinatorListenerRequestStream>,
42
43    // All subscribed vsync listeners and their optional ID filters.
44    vsync_listeners: Vec<(mpsc::UnboundedSender<VsyncEvent>, Option<DisplayId>)>,
45
46    // Simple counter to generate client-assigned integer identifiers.
47    id_counter: u64,
48
49    // Generate stamps for `commit_config()`.
50    stamp_counter: u64,
51}
52
53/// A vsync event payload.
54#[derive(Debug)]
55pub struct VsyncEvent {
56    /// The ID of the display that generated the vsync event.
57    pub id: DisplayId,
58
59    /// The monotonic timestamp of the vsync event.
60    pub timestamp: zx::MonotonicInstant,
61
62    /// The stamp of the latest fully applied display configuration.
63    pub config: display::ConfigStamp,
64}
65
66impl Coordinator {
67    /// Establishes a connection to the display-coordinator device and initialize a `Coordinator`
68    /// instance with the initial set of available displays. The returned `Coordinator` will
69    /// maintain FIDL connection to the underlying device as long as it is alive or the connection
70    /// is closed by the peer.
71    ///
72    /// Returns an error if
73    /// - No display-coordinator device is found within `TIMEOUT`.
74    /// - An initial OnDisplaysChanged event is not received from the display driver within
75    ///   `TIMEOUT` seconds.
76    ///
77    /// Current limitations:
78    ///   - This function connects to the first display-coordinator device that it observes. It
79    ///   currently does not support selection of a specific device if multiple display-coordinator
80    ///   devices are present.
81    // TODO(https://fxbug.dev/42168593): This will currently result in an error if no displays are present on
82    // the system (or if one is not attached within `TIMEOUT`). It wouldn't be neceesary to rely on
83    // a timeout if the display driver sent en event with no displays.
84    pub async fn init(client_priority: ClientPriority) -> Result<Coordinator> {
85        let service_proxy = Service::open(display::ServiceMarker)
86            .context("failed to open display Service")
87            .map_err(Error::DeviceConnectionError)?
88            .watch_for_any()
89            .map_err(Error::DeviceConnectionError)
90            .on_timeout(TIMEOUT.after_now(), || Err(Error::DeviceNotFound))
91            .await?;
92
93        let provider_proxy = service_proxy
94            .connect_to_provider()
95            .context("failed to connect to FIDL provider")
96            .map_err(|x| Error::DeviceConnectionError(x.into()))?;
97
98        let (coordinator_proxy, coordinator_server_end) =
99            fidl::endpoints::create_proxy::<display::CoordinatorMarker>();
100        let (coordinator_listener_client_end, coordinator_listener_requests) =
101            fidl::endpoints::create_request_stream::<display::CoordinatorListenerMarker>();
102
103        // TODO(https://fxbug.dev/42075865): Consider supporting virtcon client
104        // connections.
105        let payload = display::ProviderOpenCoordinatorRequest {
106            coordinator: Some(coordinator_server_end),
107            coordinator_listener: Some(coordinator_listener_client_end),
108            priority: Some(client_priority.into()),
109            __source_breaking: fidl::marker::SourceBreaking,
110        };
111        let () = provider_proxy.open_coordinator(payload).await?.map_err(zx::Status::from_raw)?;
112
113        Self::init_with_proxy_and_listener_requests(
114            coordinator_proxy,
115            coordinator_listener_requests,
116        )
117        .await
118    }
119
120    /// Initialize a `Coordinator` instance from pre-established Coordinator and
121    /// CoordinatorListener channels.
122    ///
123    /// Returns an error if
124    /// - An initial OnDisplaysChanged event is not received from the display driver within
125    ///   `TIMEOUT` seconds.
126    // TODO(https://fxbug.dev/42168593): This will currently result in an error if no displays are
127    // present on the system (or if one is not attached within `TIMEOUT`). It wouldn't be neceesary
128    // to rely on a timeout if the display driver sent en event with no displays.
129    pub async fn init_with_proxy_and_listener_requests(
130        coordinator_proxy: display::CoordinatorProxy,
131        mut listener_requests: display::CoordinatorListenerRequestStream,
132    ) -> Result<Coordinator> {
133        let displays = wait_for_initial_displays(&mut listener_requests)
134            .on_timeout(TIMEOUT.after_now(), || Err(Error::NoDisplays))
135            .await?
136            .into_iter()
137            .map(DisplayInfo)
138            .collect::<Vec<_>>();
139        Ok(Coordinator {
140            inner: Arc::new(RwLock::new(CoordinatorInner {
141                proxy: coordinator_proxy,
142                listener_requests: Some(listener_requests),
143                displays,
144                vsync_listeners: Vec::new(),
145                id_counter: 0,
146                stamp_counter: 0,
147            })),
148        })
149    }
150
151    /// Returns a copy of the list of displays that are currently known to be present on the system.
152    pub fn displays(&self) -> Vec<DisplayInfo> {
153        self.inner.read().displays.clone()
154    }
155
156    /// Returns a clone of the underlying FIDL client proxy.
157    ///
158    /// Note: This can be helpful to prevent holding the inner RwLock when awaiting a chained FIDL
159    /// call over a proxy.
160    pub fn proxy(&self) -> display::CoordinatorProxy {
161        self.inner.read().proxy.clone()
162    }
163
164    /// Registers a channel to listen to vsync events.
165    pub fn add_vsync_listener(
166        &self,
167        id: Option<DisplayId>,
168    ) -> Result<mpsc::UnboundedReceiver<VsyncEvent>> {
169        // TODO(armansito): Switch to a bounded channel instead.
170        let (sender, receiver) = mpsc::unbounded::<VsyncEvent>();
171        self.inner.write().vsync_listeners.push((sender, id));
172        Ok(receiver)
173    }
174
175    /// Returns a Future that represents the FIDL event handling task. Once scheduled on an
176    /// executor, this task will continuously handle incoming FIDL events from the display stack
177    /// and the returned Future will not terminate until the FIDL channel is closed.
178    ///
179    /// This task can be scheduled safely on any thread.
180    pub async fn handle_events(&self) -> Result<()> {
181        let inner = self.inner.clone();
182        let mut events = inner.write().listener_requests.take().ok_or(Error::AlreadyRequested)?;
183        while let Some(msg) = events.try_next().await? {
184            match msg {
185                CoordinatorListenerRequest::OnDisplaysChanged {
186                    added,
187                    removed,
188                    control_handle: _,
189                } => {
190                    let removed =
191                        removed.into_iter().map(|id| id.into()).collect::<Vec<DisplayId>>();
192                    inner.read().handle_displays_changed(added, removed);
193                }
194                CoordinatorListenerRequest::OnVsync {
195                    display_id,
196                    timestamp,
197                    displayed_config_stamp,
198                    cookie,
199                    control_handle: _,
200                } => {
201                    inner.write().handle_vsync(
202                        display_id.into(),
203                        timestamp,
204                        displayed_config_stamp,
205                        cookie,
206                    )?;
207                }
208                _ => continue,
209            }
210        }
211        Ok(())
212    }
213
214    /// Allocates a new virtual hardware layer that is not associated with any display and has no
215    /// configuration.
216    pub async fn create_layer(&self) -> Result<LayerId> {
217        let layer_id = self.inner.write().next_free_layer_id()?;
218        self.proxy().create_layer(&layer_id.into()).await?.map_err(zx::Status::from_raw)?;
219        Ok(layer_id)
220    }
221
222    /// Creates and registers a zircon event with the display driver. The returned event can be
223    /// used as a fence in a display configuration.
224    pub fn create_event(&self) -> Result<Event> {
225        let event = zx::Event::create();
226        let remote = event.duplicate_handle(zx::Rights::SAME_RIGHTS)?;
227        let id = self.inner.write().next_free_event_id()?;
228
229        self.inner.read().proxy.import_event(zx::Event::from(remote), &id.into())?;
230        Ok(Event::new(id, event))
231    }
232
233    /// Apply a display configuration. The client is expected to receive a vsync event once the
234    /// configuration is successfully applied. Returns an error if the FIDL message cannot be sent.
235    pub async fn commit_config(
236        &self,
237        configs: &[DisplayConfig],
238    ) -> std::result::Result<u64, ConfigError> {
239        let proxy = self.proxy();
240        for config in configs {
241            proxy.set_display_layers(
242                &config.id.into(),
243                &config.layers.iter().map(|l| l.id.into()).collect::<Vec<FidlLayerId>>(),
244            )?;
245            for layer in &config.layers {
246                match &layer.config {
247                    LayerConfig::Color { color, display_destination } => {
248                        let fidl_color = fidl_fuchsia_hardware_display_types::Color::from(color);
249                        proxy.set_layer_color_config(
250                            &layer.id.into(),
251                            &fidl_color,
252                            display_destination,
253                        )?;
254                    }
255                    LayerConfig::Primary { image_id, image_metadata, unblock_event, alpha } => {
256                        proxy.set_layer_primary_config(&layer.id.into(), &image_metadata)?;
257                        if let Some(alpha_config) = alpha {
258                            proxy.set_layer_primary_alpha(
259                                &layer.id.into(),
260                                alpha_config.mode,
261                                alpha_config.val,
262                            )?;
263                        }
264                        proxy.set_layer_image2(
265                            &layer.id.into(),
266                            &(*image_id).into(),
267                            &unblock_event.unwrap_or(INVALID_EVENT_ID).into(),
268                        )?;
269                    }
270                }
271            }
272        }
273
274        let result = proxy.check_config().await?;
275        if result != display_types::ConfigResult::Ok {
276            return Err(ConfigError::invalid(result));
277        }
278
279        let config_stamp = self.inner.write().next_config_stamp().unwrap();
280        let payload = fidl_fuchsia_hardware_display::CoordinatorCommitConfigRequest {
281            stamp: Some(fidl_fuchsia_hardware_display::ConfigStamp { value: config_stamp }),
282            ..Default::default()
283        };
284        match proxy.commit_config(payload) {
285            Ok(()) => Ok(config_stamp),
286            Err(err) => Err(ConfigError::from(err)),
287        }
288    }
289
290    /// Get the config stamp value of the most recent applied config in
291    /// `commit_config`. Returns an error if the FIDL message cannot be sent.
292    pub async fn get_recent_committed_config_stamp(&self) -> std::result::Result<u64, Error> {
293        let proxy = self.proxy();
294        let response = proxy.get_latest_committed_config_stamp().await?;
295        Ok(response.value)
296    }
297
298    /// Import a sysmem buffer collection. The returned `BufferCollectionId` can be used in future
299    /// API calls to refer to the imported collection.
300    pub(crate) async fn import_buffer_collection(
301        &self,
302        token: ClientEnd<fidl_fuchsia_sysmem2::BufferCollectionTokenMarker>,
303    ) -> Result<BufferCollectionId> {
304        let id = self.inner.write().next_free_collection_id()?;
305        let proxy = self.proxy();
306
307        // First import the token.
308        proxy.import_buffer_collection(&id.into(), token).await?.map_err(zx::Status::from_raw)?;
309
310        // Tell the driver to assign any device-specific constraints.
311        // TODO(https://fxbug.dev/42166207): These fields are effectively unused except for `type` in the case
312        // of IMAGE_TYPE_CAPTURE.
313        proxy
314            .set_buffer_collection_constraints(
315                &id.into(),
316                &display_types::ImageBufferUsage { tiling_type: IMAGE_TILING_TYPE_LINEAR },
317            )
318            .await?
319            .map_err(zx::Status::from_raw)?;
320        Ok(id)
321    }
322
323    /// Notify the display driver to release its handle on a previously imported buffer collection.
324    pub(crate) fn release_buffer_collection(&self, id: BufferCollectionId) -> Result<()> {
325        self.inner.read().proxy.release_buffer_collection(&id.into()).map_err(Error::from)
326    }
327
328    /// Register a sysmem buffer collection backed image to the display driver.
329    pub(crate) async fn import_image(
330        &self,
331        collection_id: BufferCollectionId,
332        image_id: ImageId,
333        image_metadata: display_types::ImageMetadata,
334    ) -> Result<()> {
335        self.proxy()
336            .import_image(
337                &image_metadata,
338                &collection_id.into(),
339                0, // buffer_index
340                &image_id.into(),
341            )
342            .await?
343            .map_err(zx::Status::from_raw)?;
344        Ok(())
345    }
346}
347
348// fmt::Debug implementation to allow a `Coordinator` instance to be used with a debug format
349// specifier. We use a custom implementation as not all `Coordinator` members derive fmt::Debug.
350impl fmt::Debug for Coordinator {
351    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
352        f.debug_struct("Coordinator").field("displays", &self.displays()).finish()
353    }
354}
355
356impl CoordinatorInner {
357    fn next_free_collection_id(&mut self) -> Result<BufferCollectionId> {
358        self.id_counter = self.id_counter.checked_add(1).ok_or(Error::IdsExhausted)?;
359        Ok(BufferCollectionId(self.id_counter))
360    }
361
362    fn next_free_event_id(&mut self) -> Result<EventId> {
363        self.id_counter = self.id_counter.checked_add(1).ok_or(Error::IdsExhausted)?;
364        Ok(EventId(self.id_counter))
365    }
366
367    fn next_free_layer_id(&mut self) -> Result<LayerId> {
368        self.id_counter = self.id_counter.checked_add(1).ok_or(Error::IdsExhausted)?;
369        Ok(LayerId(self.id_counter))
370    }
371
372    fn next_config_stamp(&mut self) -> Result<u64> {
373        self.stamp_counter = self.stamp_counter.checked_add(1).ok_or(Error::IdsExhausted)?;
374        Ok(self.stamp_counter)
375    }
376
377    fn handle_displays_changed(&self, _added: Vec<display::Info>, _removed: Vec<DisplayId>) {
378        // TODO(armansito): update the displays list and notify clients. Terminate vsync listeners
379        // that are attached to a removed display.
380    }
381
382    fn handle_vsync(
383        &mut self,
384        display_id: DisplayId,
385        timestamp: zx::MonotonicInstant,
386        displayed_config_stamp: display::ConfigStamp,
387        cookie: display::VsyncAckCookie,
388    ) -> Result<()> {
389        if cookie.value != 0 {
390            self.proxy.acknowledge_vsync(cookie.value)?;
391        }
392
393        let mut listeners_to_remove = Vec::new();
394        for (pos, (sender, filter)) in self.vsync_listeners.iter().enumerate() {
395            // Skip the listener if it has a filter that does not match `display_id`.
396            if filter.as_ref().map_or(false, |id| *id != display_id) {
397                continue;
398            }
399            let payload = VsyncEvent { id: display_id, timestamp, config: displayed_config_stamp };
400            if let Err(e) = sender.unbounded_send(payload) {
401                if e.is_disconnected() {
402                    listeners_to_remove.push(pos);
403                } else {
404                    return Err(e.into());
405                }
406            }
407        }
408
409        // Clean up disconnected listeners.
410        listeners_to_remove.into_iter().for_each(|pos| {
411            self.vsync_listeners.swap_remove(pos);
412        });
413
414        Ok(())
415    }
416}
417
418// Waits for a single fuchsia.hardware.display.Coordinator.OnDisplaysChanged event and returns the
419// reported displays. By API contract, this event will fire at least once upon initial channel
420// connection if any displays are present. If no displays are present, then the returned Future
421// will not resolve until a display is plugged in.
422async fn wait_for_initial_displays(
423    listener_requests: &mut display::CoordinatorListenerRequestStream,
424) -> Result<Vec<display::Info>> {
425    let mut stream = listener_requests.try_filter_map(|event| match event {
426        CoordinatorListenerRequest::OnDisplaysChanged { added, removed: _, control_handle: _ } => {
427            future::ok(Some(added))
428        }
429        _ => future::ok(None),
430    });
431    stream.try_next().await?.ok_or(Error::NoDisplays)
432}
433
434#[cfg(test)]
435mod tests {
436    use super::{Coordinator, DisplayId, VsyncEvent};
437    use anyhow::{Context, Result, format_err};
438    use assert_matches::assert_matches;
439    use display_mocks::{MockCoordinator, create_proxy_and_mock};
440    use fuchsia_async::TestExecutor;
441    use futures::task::Poll;
442    use futures::{FutureExt, StreamExt, pin_mut, select};
443    use {
444        fidl_fuchsia_hardware_display as display,
445        fidl_fuchsia_hardware_display_types as display_types,
446    };
447
448    async fn init_with_proxy_and_listener_requests(
449        coordinator_proxy: display::CoordinatorProxy,
450        listener_requests: display::CoordinatorListenerRequestStream,
451    ) -> Result<Coordinator> {
452        Coordinator::init_with_proxy_and_listener_requests(coordinator_proxy, listener_requests)
453            .await
454            .context("failed to initialize Coordinator")
455    }
456
457    // Returns a Coordinator and a connected mock FIDL server. This function sets up the initial
458    // "OnDisplaysChanged" event with the given list of `displays`, which `Coordinator` requires
459    // before it can resolve its initialization Future.
460    async fn init_with_displays(
461        displays: &[display::Info],
462    ) -> Result<(Coordinator, MockCoordinator)> {
463        let (coordinator_proxy, listener_requests, mut mock) = create_proxy_and_mock()?;
464        mock.assign_displays(displays.to_vec())?;
465
466        Ok((
467            init_with_proxy_and_listener_requests(coordinator_proxy, listener_requests).await?,
468            mock,
469        ))
470    }
471
472    #[fuchsia::test]
473    async fn test_init_fails_with_no_device_dir() {
474        let result = Coordinator::init(crate::types::ClientPriority(300)).await;
475        assert_matches!(result, Err(_));
476    }
477
478    #[fuchsia::test]
479    async fn test_init_with_no_displays() -> Result<()> {
480        let (coordinator_proxy, listener_requests, mut mock) = create_proxy_and_mock()?;
481        mock.assign_displays([].to_vec())?;
482
483        let coordinator =
484            init_with_proxy_and_listener_requests(coordinator_proxy, listener_requests).await?;
485        assert!(coordinator.displays().is_empty());
486
487        Ok(())
488    }
489
490    // TODO(https://fxbug.dev/42075852): We should have an automated test verifying that
491    // the service provided by driver framework can be opened correctly.
492
493    #[fuchsia::test]
494    async fn test_init_with_displays() -> Result<()> {
495        let displays = [
496            display::Info {
497                id: display_types::DisplayId { value: 1 },
498                modes: Vec::new(),
499                pixel_format: Vec::new(),
500                manufacturer_name: "Foo".to_string(),
501                monitor_name: "what".to_string(),
502                monitor_serial: "".to_string(),
503                horizontal_size_mm: 0,
504                vertical_size_mm: 0,
505                using_fallback_size: false,
506                max_layer_count: 1,
507            },
508            display::Info {
509                id: display_types::DisplayId { value: 2 },
510                modes: Vec::new(),
511                pixel_format: Vec::new(),
512                manufacturer_name: "Bar".to_string(),
513                monitor_name: "who".to_string(),
514                monitor_serial: "".to_string(),
515                horizontal_size_mm: 0,
516                vertical_size_mm: 0,
517                using_fallback_size: false,
518                max_layer_count: 1,
519            },
520        ]
521        .to_vec();
522        let (coordinator_proxy, listener_requests, mut mock) = create_proxy_and_mock()?;
523        mock.assign_displays(displays.clone())?;
524
525        let coordinator =
526            init_with_proxy_and_listener_requests(coordinator_proxy, listener_requests).await?;
527        assert_eq!(coordinator.displays().len(), 2);
528        assert_eq!(coordinator.displays()[0].0, displays[0]);
529        assert_eq!(coordinator.displays()[1].0, displays[1]);
530
531        Ok(())
532    }
533
534    #[test]
535    fn test_vsync_listener_single() -> Result<()> {
536        // Drive an executor directly for this test to avoid having to rely on timeouts for cases
537        // in which no events are received.
538        let mut executor = TestExecutor::new();
539        let (coordinator, mock) = executor.run_singlethreaded(init_with_displays(&[]))?;
540        let mut vsync = coordinator.add_vsync_listener(None)?;
541
542        const ID: DisplayId = DisplayId(1);
543        const STAMP: display::ConfigStamp = display::ConfigStamp { value: 1 };
544        let event_handlers = async {
545            select! {
546                event = vsync.next() => event.ok_or_else(|| format_err!("did not receive vsync event")),
547                result = coordinator.handle_events().fuse() => {
548                    result.context("FIDL event handler failed")?;
549                    Err(format_err!("FIDL event handler completed before client vsync event"))
550                },
551            }
552        };
553        pin_mut!(event_handlers);
554
555        // Send a single event.
556        mock.emit_vsync_event(ID.0, STAMP)?;
557        let vsync_event = executor.run_until_stalled(&mut event_handlers);
558        assert_matches!(
559            vsync_event,
560            Poll::Ready(Ok(VsyncEvent { id: ID, timestamp: _, config: STAMP }))
561        );
562
563        Ok(())
564    }
565
566    #[test]
567    fn test_vsync_listener_multiple() -> Result<()> {
568        // Drive an executor directly for this test to avoid having to rely on timeouts for cases
569        // in which no events are received.
570        let mut executor = TestExecutor::new();
571        let (coordinator, mock) = executor.run_singlethreaded(init_with_displays(&[]))?;
572        let mut vsync = coordinator.add_vsync_listener(None)?;
573
574        let fidl_server = coordinator.handle_events().fuse();
575        pin_mut!(fidl_server);
576
577        const ID1: DisplayId = DisplayId(1);
578        const ID2: DisplayId = DisplayId(2);
579        const STAMP: display::ConfigStamp = display::ConfigStamp { value: 1 };
580
581        // Queue multiple events.
582        mock.emit_vsync_event(ID1.0, STAMP)?;
583        mock.emit_vsync_event(ID2.0, STAMP)?;
584        mock.emit_vsync_event(ID1.0, STAMP)?;
585
586        // Process the FIDL events. The FIDL server Future should not complete as it runs
587        // indefinitely.
588        let fidl_server_result = executor.run_until_stalled(&mut fidl_server);
589        assert_matches!(fidl_server_result, Poll::Pending);
590
591        // Process the vsync listener.
592        let vsync_event = executor.run_until_stalled(&mut Box::pin(async { vsync.next().await }));
593        assert_matches!(
594            vsync_event,
595            Poll::Ready(Some(VsyncEvent { id: ID1, timestamp: _, config: STAMP }))
596        );
597
598        let vsync_event = executor.run_until_stalled(&mut Box::pin(async { vsync.next().await }));
599        assert_matches!(
600            vsync_event,
601            Poll::Ready(Some(VsyncEvent { id: ID2, timestamp: _, config: STAMP }))
602        );
603
604        let vsync_event = executor.run_until_stalled(&mut Box::pin(async { vsync.next().await }));
605        assert_matches!(
606            vsync_event,
607            Poll::Ready(Some(VsyncEvent { id: ID1, timestamp: _, config: STAMP }))
608        );
609
610        Ok(())
611    }
612
613    #[test]
614    fn test_vsync_listener_display_id_filter() -> Result<()> {
615        // Drive an executor directly for this test to avoid having to rely on timeouts for cases
616        // in which no events are received.
617        let mut executor = TestExecutor::new();
618        let (coordinator, mock) = executor.run_singlethreaded(init_with_displays(&[]))?;
619
620        const ID1: DisplayId = DisplayId(1);
621        const ID2: DisplayId = DisplayId(2);
622        const STAMP: display::ConfigStamp = display::ConfigStamp { value: 1 };
623
624        // Listen to events from ID2.
625        let mut vsync = coordinator.add_vsync_listener(Some(ID2))?;
626        let event_handlers = async {
627            select! {
628                event = vsync.next() => event.ok_or_else(|| format_err!("did not receive vsync event")),
629                result = coordinator.handle_events().fuse() => {
630                    result.context("FIDL event handler failed")?;
631                    Err(format_err!("FIDL event handler completed before client vsync event"))
632                },
633            }
634        };
635        pin_mut!(event_handlers);
636
637        // Event from ID1 should get filtered out and the client should not receive any events.
638        mock.emit_vsync_event(ID1.0, STAMP)?;
639        let vsync_event = executor.run_until_stalled(&mut event_handlers);
640        assert_matches!(vsync_event, Poll::Pending);
641
642        // Event from ID2 should be received.
643        mock.emit_vsync_event(ID2.0, STAMP)?;
644        let vsync_event = executor.run_until_stalled(&mut event_handlers);
645        assert_matches!(
646            vsync_event,
647            Poll::Ready(Ok(VsyncEvent { id: ID2, timestamp: _, config: STAMP }))
648        );
649
650        Ok(())
651    }
652}