elf_runner/
lib.rs

1// Copyright 2019 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5mod component;
6mod component_set;
7mod config;
8mod crash_handler;
9pub mod crash_info;
10mod error;
11mod logger;
12mod memory;
13pub mod process_launcher;
14mod runtime_dir;
15mod stdout;
16pub mod vdso_vmo;
17
18use self::component::{ElfComponent, ElfComponentInfo};
19use self::config::ElfProgramConfig;
20use self::error::{JobError, StartComponentError, StartInfoError};
21use self::runtime_dir::RuntimeDirBuilder;
22use self::stdout::bind_streams_to_syslog;
23use crate::component_set::ComponentSet;
24use crate::config::ElfProgramBadHandlesPolicy;
25use crate::crash_info::CrashRecords;
26use crate::memory::reporter::MemoryReporter;
27use crate::vdso_vmo::get_next_vdso_vmo;
28use ::routing::policy::ScopedPolicyChecker;
29use chrono::DateTime;
30use fidl::endpoints::ServerEnd;
31use fidl_fuchsia_component_runner::{
32    ComponentDiagnostics, ComponentTasks, Task as DiagnosticsTask,
33};
34use fidl_fuchsia_process_lifecycle::LifecycleMarker;
35use fuchsia_async::{self as fasync, TimeoutExt};
36use fuchsia_runtime::{HandleInfo, HandleType, UtcClock, duplicate_utc_clock_handle, job_default};
37use futures::TryStreamExt;
38use futures::channel::oneshot;
39use log::{trace, warn};
40use moniker::Moniker;
41use runner::StartInfo;
42use runner::component::StopInfo;
43use std::collections::HashMap;
44use std::path::Path;
45use std::sync::Arc;
46use vfs::execution_scope::ExecutionScope;
47use zx::{self as zx, AsHandleRef, HandleBased};
48use {
49    fidl_fuchsia_component as fcomp, fidl_fuchsia_component_runner as fcrunner,
50    fidl_fuchsia_io as fio, fidl_fuchsia_memory_attribution as fattribution,
51    fidl_fuchsia_process as fproc,
52};
53
54// Maximum time that the runner will wait for break_on_start eventpair to signal.
55// This is set to prevent debuggers from blocking us for too long, either intentionally
56// or unintentionally.
57const MAX_WAIT_BREAK_ON_START: zx::MonotonicDuration = zx::MonotonicDuration::from_millis(300);
58
59// Minimum timer slack amount and default mode. The amount should be large enough to allow for some
60// coalescing of timers, but small enough to ensure applications don't miss deadlines.
61//
62// TODO(https://fxbug.dev/42120293): For now, set the value to 50us to avoid delaying performance-critical
63// timers in Scenic and other system services.
64const TIMER_SLACK_DURATION: zx::MonotonicDuration = zx::MonotonicDuration::from_micros(50);
65
66// Rights used when duplicating the UTC clock handle.
67//
68// Formed out of:
69// * `zx::Rights::BASIC`, but
70// * with `zx::Rights::WRITE` stripped (UTC clock is normally read-only), and
71// * with `zx::Rights::INSPECT` added (so that `ZX_INFO_CLOCK_MAPPED_SIZE` can be queried).
72//
73// Rather than subtracting `WRITE` from `BASIC`, we build the rights explicitly to avoid
74// including unintended rights by accident.
75//
76// The bitwise `|` operator for `bitflags` is implemented through the `std::ops::BitOr` trait,
77// which cannot be used in a const context. The workaround is to bitwise OR the raw bits.
78const DUPLICATE_CLOCK_RIGHTS: zx::Rights = zx::Rights::from_bits_truncate(
79    zx::Rights::READ.bits() // BASIC
80        | zx::Rights::WAIT.bits() // BASIC
81        | zx::Rights::DUPLICATE.bits() // BASIC
82        | zx::Rights::TRANSFER.bits() // BASIC
83        | zx::Rights::INSPECT.bits()
84        // Allows calls to zx_clock_read_mappable and zx_clock_get_details_mappable.
85        // Since "regular" read and details only require READ, and mappable read
86        // and details read the clock the same way, it seems safe to include MAP
87        // in this set of rights.
88        | zx::Rights::MAP.bits(),
89);
90
91// Mapping of component monikers to "acceptable" exit codes
92//
93// There are some ELF programs that upon exit, produce certain exit codes that
94// are "normal" part of operation. The most interesting of these from Fuchsia's
95// perspective is the sshd binary, which returns a 255 exit code when the client
96// hangs up unexpectedly (e.g. sending SIGINT to a running ssh process).
97//
98// Due to how `ffx` interacts with the Target over ssh in a user-interactive mode,
99// it is commonplace for the user to SIGINT their locally running ffx processes
100// which SIGINT's the SSH process running on the Host, which causes misleading
101// logs on the Target, implying that sshd has had an error (when in-fact there is none).
102//
103// If this list grows significantly (not expected). We may consider adding
104// this as a formal configuration option somewhere. That said this is (currently)
105// only for suppressing diagnostic logs, so this is unlikely.
106static MONIKER_PREFIXES_TO_ACCEPTABLE_EXIT_CODES: std::sync::LazyLock<
107    HashMap<&'static str, Vec<i64>>,
108> = std::sync::LazyLock::new(|| {
109    let mut m = HashMap::new();
110    m.insert("core/sshd-host/shell:sshd-", vec![255]);
111    m
112});
113
114// Builds and serves the runtime directory
115/// Runs components with ELF binaries.
116pub struct ElfRunner {
117    /// Each ELF component run by this runner will live inside a job that is a
118    /// child of this job.
119    job: zx::Job,
120
121    launcher_connector: process_launcher::Connector,
122
123    /// If `utc_clock` is populated then that Clock's handle will
124    /// be passed into the newly created process. Otherwise, the UTC
125    /// clock will be duplicated from current process' process table.
126    /// The latter is typically the case in unit tests and nested
127    /// component managers.
128    utc_clock: Option<Arc<UtcClock>>,
129
130    crash_records: CrashRecords,
131
132    /// Tracks the ELF components that are currently running under this runner.
133    components: Arc<ComponentSet>,
134
135    /// Tracks reporting memory changes to an observer.
136    memory_reporter: MemoryReporter,
137
138    /// Tasks that support the runner are launched in this scope
139    scope: ExecutionScope,
140}
141
142/// The job for a component.
143pub enum Job {
144    Single(zx::Job),
145    Multiple { parent: zx::Job, child: zx::Job },
146}
147
148impl Job {
149    fn top(&self) -> &zx::Job {
150        match self {
151            Job::Single(job) => job,
152            Job::Multiple { parent, child: _ } => parent,
153        }
154    }
155
156    fn proc(&self) -> &zx::Job {
157        match self {
158            Job::Single(job) => job,
159            Job::Multiple { parent: _, child } => child,
160        }
161    }
162}
163
164impl ElfRunner {
165    pub fn new(
166        job: zx::Job,
167        launcher_connector: process_launcher::Connector,
168        utc_clock: Option<Arc<UtcClock>>,
169        crash_records: CrashRecords,
170    ) -> ElfRunner {
171        let scope = ExecutionScope::new();
172        let components = ComponentSet::new(scope.clone());
173        let memory_reporter = MemoryReporter::new(components.clone());
174        ElfRunner {
175            job,
176            launcher_connector,
177            utc_clock,
178            crash_records,
179            components,
180            memory_reporter,
181            scope,
182        }
183    }
184
185    /// Returns a UTC clock handle.
186    ///
187    /// Duplicates `self.utc_clock` if populated, or the UTC clock assigned to the current process.
188    async fn duplicate_utc_clock(&self) -> Result<UtcClock, zx::Status> {
189        if let Some(utc_clock) = &self.utc_clock {
190            utc_clock.duplicate_handle(DUPLICATE_CLOCK_RIGHTS)
191        } else {
192            duplicate_utc_clock_handle(DUPLICATE_CLOCK_RIGHTS)
193        }
194    }
195
196    /// Creates a job for a component.
197    fn create_job(&self, program_config: &ElfProgramConfig) -> Result<Job, JobError> {
198        let job = self.job.create_child_job().map_err(JobError::CreateChild)?;
199
200        // Set timer slack.
201        //
202        // Why Late and not Center or Early? Timers firing a little later than requested is not
203        // uncommon in non-realtime systems. Programs are generally tolerant of some
204        // delays. However, timers firing before their deadline can be unexpected and lead to bugs.
205        job.set_policy(zx::JobPolicy::TimerSlack(
206            TIMER_SLACK_DURATION,
207            zx::JobDefaultTimerMode::Late,
208        ))
209        .map_err(JobError::SetPolicy)?;
210
211        // Prevent direct creation of processes.
212        //
213        // The kernel-level mechanisms for creating processes are very low-level. We require that
214        // all processes be created via fuchsia.process.Launcher in order for the platform to
215        // maintain change-control over how processes are created.
216        if !program_config.create_raw_processes {
217            job.set_policy(zx::JobPolicy::Basic(
218                zx::JobPolicyOption::Absolute,
219                vec![(zx::JobCondition::NewProcess, zx::JobAction::Deny)],
220            ))
221            .map_err(JobError::SetPolicy)?;
222        }
223
224        // Default deny the job policy which allows ambiently marking VMOs executable, i.e. calling
225        // vmo_replace_as_executable without an appropriate resource handle.
226        if !program_config.ambient_mark_vmo_exec {
227            job.set_policy(zx::JobPolicy::Basic(
228                zx::JobPolicyOption::Absolute,
229                vec![(zx::JobCondition::AmbientMarkVmoExec, zx::JobAction::Deny)],
230            ))
231            .map_err(JobError::SetPolicy)?;
232        }
233
234        if let Some(job_policy_bad_handles) = &program_config.job_policy_bad_handles {
235            let action = match job_policy_bad_handles {
236                ElfProgramBadHandlesPolicy::DenyException => zx::JobAction::DenyException,
237                ElfProgramBadHandlesPolicy::AllowException => zx::JobAction::AllowException,
238            };
239            job.set_policy(zx::JobPolicy::Basic(
240                zx::JobPolicyOption::Absolute,
241                vec![(zx::JobCondition::BadHandle, action)],
242            ))
243            .map_err(JobError::SetPolicy)?;
244        }
245
246        Ok(if program_config.job_with_available_exception_channel {
247            // Create a new job to hold the process because the component wants
248            // the process to be a direct child of a job that has its exception
249            // channel available for taking. Note that we (the ELF runner) uses
250            // a job's exception channel for crash recording so we create a new
251            // job underneath the original job to hold the process.
252            let child = job.create_child_job().map_err(JobError::CreateChild)?;
253            Job::Multiple { parent: job, child }
254        } else {
255            Job::Single(job)
256        })
257    }
258
259    fn create_handle_infos(
260        outgoing_dir: Option<zx::Channel>,
261        lifecycle_server: Option<zx::Channel>,
262        utc_clock: UtcClock,
263        next_vdso: Option<zx::Vmo>,
264        config_vmo: Option<zx::Vmo>,
265    ) -> Vec<fproc::HandleInfo> {
266        let mut handle_infos = vec![];
267
268        if let Some(outgoing_dir) = outgoing_dir {
269            handle_infos.push(fproc::HandleInfo {
270                handle: outgoing_dir.into_handle(),
271                id: HandleInfo::new(HandleType::DirectoryRequest, 0).as_raw(),
272            });
273        }
274
275        if let Some(lifecycle_chan) = lifecycle_server {
276            handle_infos.push(fproc::HandleInfo {
277                handle: lifecycle_chan.into_handle(),
278                id: HandleInfo::new(HandleType::Lifecycle, 0).as_raw(),
279            })
280        };
281
282        handle_infos.push(fproc::HandleInfo {
283            handle: utc_clock.into_handle(),
284            id: HandleInfo::new(HandleType::ClockUtc, 0).as_raw(),
285        });
286
287        if let Some(next_vdso) = next_vdso {
288            handle_infos.push(fproc::HandleInfo {
289                handle: next_vdso.into_handle(),
290                id: HandleInfo::new(HandleType::VdsoVmo, 0).as_raw(),
291            });
292        }
293
294        if let Some(config_vmo) = config_vmo {
295            handle_infos.push(fproc::HandleInfo {
296                handle: config_vmo.into_handle(),
297                id: HandleInfo::new(HandleType::ComponentConfigVmo, 0).as_raw(),
298            });
299        }
300
301        handle_infos
302    }
303
304    pub async fn start_component(
305        &self,
306        start_info: fcrunner::ComponentStartInfo,
307        checker: &ScopedPolicyChecker,
308    ) -> Result<ElfComponent, StartComponentError> {
309        let start_info: StartInfo =
310            start_info.try_into().map_err(StartInfoError::StartInfoError)?;
311
312        let resolved_url = start_info.resolved_url.clone();
313
314        // This also checks relevant security policy for config that it wraps using the provided
315        // PolicyChecker.
316        let program_config = ElfProgramConfig::parse_and_check(&start_info.program, &checker)
317            .map_err(|err| {
318                StartComponentError::StartInfoError(StartInfoError::ProgramError(err))
319            })?;
320
321        let main_process_critical = program_config.main_process_critical;
322        let res: Result<ElfComponent, StartComponentError> =
323            self.start_component_helper(start_info, checker.scope.clone(), program_config).await;
324        match res {
325            Err(e) if main_process_critical => {
326                panic!(
327                    "failed to launch component with a critical process ({:?}): {:?}",
328                    &resolved_url, e
329                )
330            }
331            x => x,
332        }
333    }
334
335    async fn start_component_helper(
336        &self,
337        mut start_info: StartInfo,
338        moniker: Moniker,
339        program_config: ElfProgramConfig,
340    ) -> Result<ElfComponent, StartComponentError> {
341        let resolved_url = &start_info.resolved_url;
342
343        // Fail early if there are clock issues.
344        let boot_clock = zx::Clock::<zx::MonotonicTimeline, zx::BootTimeline>::create(
345            zx::ClockOpts::CONTINUOUS,
346            /*backstop=*/ None,
347        )
348        .map_err(StartComponentError::BootClockCreateFailed)?;
349
350        // Connect to `fuchsia.process.Launcher`.
351        let launcher = self
352            .launcher_connector
353            .connect()
354            .map_err(|err| StartComponentError::ProcessLauncherConnectError(err.into()))?;
355
356        // Create a job for this component that will contain its process.
357        let job = self.create_job(&program_config)?;
358
359        crash_handler::run_exceptions_server(
360            &self.scope,
361            job.top(),
362            moniker.clone(),
363            resolved_url.clone(),
364            self.crash_records.clone(),
365        )
366        .map_err(StartComponentError::ExceptionRegistrationFailed)?;
367
368        // Convert the directories into proxies, so we can find "/pkg" and open "lib" and bin_path
369        let ns = namespace::Namespace::try_from(start_info.namespace)
370            .map_err(StartComponentError::NamespaceError)?;
371
372        let config_vmo =
373            start_info.encoded_config.take().map(runner::get_config_vmo).transpose()?;
374
375        let next_vdso = program_config.use_next_vdso.then(get_next_vdso_vmo).transpose()?;
376
377        let (lifecycle_client, lifecycle_server) = if program_config.notify_lifecycle_stop {
378            // Creating a channel is not expected to fail.
379            let (client, server) = fidl::endpoints::create_proxy::<LifecycleMarker>();
380            (Some(client), Some(server.into_channel()))
381        } else {
382            (None, None)
383        };
384
385        // Take the UTC clock handle out of `start_info.numbered_handles`, if available.
386        let utc_handle = start_info
387            .numbered_handles
388            .iter()
389            .position(|handles| handles.id == HandleInfo::new(HandleType::ClockUtc, 0).as_raw())
390            .map(|position| start_info.numbered_handles.swap_remove(position).handle);
391
392        let utc_clock = if let Some(handle) = utc_handle {
393            zx::Clock::from(handle)
394        } else {
395            self.duplicate_utc_clock()
396                .await
397                .map_err(StartComponentError::UtcClockDuplicateFailed)?
398        };
399
400        // Duplicate the clock handle again, used later to wait for the clock to start, while
401        // the original handle is passed to the process.
402        let utc_clock_dup = utc_clock
403            .duplicate_handle(zx::Rights::SAME_RIGHTS)
404            .map_err(StartComponentError::UtcClockDuplicateFailed)?;
405
406        // Create and serve the runtime dir.
407        let runtime_dir_server_end = start_info
408            .runtime_dir
409            .ok_or(StartComponentError::StartInfoError(StartInfoError::MissingRuntimeDir))?;
410        let job_koid =
411            job.proc().get_koid().map_err(StartComponentError::JobGetKoidFailed)?.raw_koid();
412
413        let runtime_dir = RuntimeDirBuilder::new(runtime_dir_server_end)
414            .args(program_config.args.clone())
415            .job_id(job_koid)
416            .serve();
417
418        // If the component supports memory attribution, clone its outgoing directory connection
419        // so that we may later connect to it.
420        let outgoing_directory = if program_config.memory_attribution {
421            let Some(outgoing_dir) = start_info.outgoing_dir else {
422                return Err(StartComponentError::StartInfoError(
423                    StartInfoError::MissingOutgoingDir,
424                ));
425            };
426            let (outgoing_dir_client, outgoing_dir_server) = fidl::endpoints::create_endpoints();
427            start_info.outgoing_dir = Some(outgoing_dir_server);
428            fdio::open_at(
429                outgoing_dir_client.channel(),
430                ".",
431                fio::Flags::PROTOCOL_DIRECTORY,
432                outgoing_dir.into_channel(),
433            )
434            .unwrap();
435            Some(outgoing_dir_client)
436        } else {
437            None
438        };
439
440        // Create procarg handles.
441        let mut handle_infos = ElfRunner::create_handle_infos(
442            start_info.outgoing_dir.map(|dir| dir.into_channel()),
443            lifecycle_server,
444            utc_clock,
445            next_vdso,
446            config_vmo,
447        );
448
449        // Add stdout and stderr handles that forward to syslog.
450        let (local_scope, stdout_and_stderr_handles) =
451            bind_streams_to_syslog(&ns, program_config.stdout_sink, program_config.stderr_sink);
452        handle_infos.extend(stdout_and_stderr_handles);
453
454        // Add any external numbered handles.
455        handle_infos.extend(start_info.numbered_handles);
456
457        // If the program escrowed a dictionary, give it back via `numbered_handles`.
458        if let Some(escrowed_dictionary) = start_info.escrowed_dictionary {
459            handle_infos.push(fproc::HandleInfo {
460                handle: escrowed_dictionary.token.into_handle().into(),
461                id: HandleInfo::new(HandleType::EscrowedDictionary, 0).as_raw(),
462            });
463        }
464
465        // Configure the process launcher.
466        let proc_job_dup = job
467            .proc()
468            .duplicate_handle(zx::Rights::SAME_RIGHTS)
469            .map_err(StartComponentError::JobDuplicateFailed)?;
470
471        let name = Path::new(resolved_url)
472            .file_name()
473            .and_then(|filename| filename.to_str())
474            .ok_or_else(|| {
475                StartComponentError::StartInfoError(StartInfoError::BadResolvedUrl(
476                    resolved_url.clone(),
477                ))
478            })?;
479
480        let launch_info =
481            runner::component::configure_launcher(runner::component::LauncherConfigArgs {
482                bin_path: &program_config.binary,
483                name,
484                options: program_config.process_options(),
485                args: Some(program_config.args.clone()),
486                ns,
487                job: Some(proc_job_dup),
488                handle_infos: Some(handle_infos),
489                name_infos: None,
490                environs: program_config.environ.clone(),
491                launcher: &launcher,
492                loader_proxy_chan: None,
493                executable_vmo: None,
494            })
495            .await?;
496
497        // Wait on break_on_start with a timeout and don't fail.
498        if let Some(break_on_start) = start_info.break_on_start {
499            fasync::OnSignals::new(&break_on_start, zx::Signals::OBJECT_PEER_CLOSED)
500                .on_timeout(MAX_WAIT_BREAK_ON_START, || Err(zx::Status::TIMED_OUT))
501                .await
502                .err()
503                .map(|error| warn!(moniker:%, error:%; "Failed to wait break_on_start"));
504        }
505
506        // Launch the process.
507        let (status, process) = launcher
508            .launch(launch_info)
509            .await
510            .map_err(StartComponentError::ProcessLauncherFidlError)?;
511        zx::Status::ok(status).map_err(StartComponentError::CreateProcessFailed)?;
512        let process = process.unwrap(); // Process is present iff status is OK.
513        if program_config.main_process_critical {
514            job_default()
515                .set_critical(zx::JobCriticalOptions::RETCODE_NONZERO, &process)
516                .map_err(StartComponentError::ProcessMarkCriticalFailed)
517                .expect("failed to set process as critical");
518        }
519
520        let pid = process.get_koid().map_err(StartComponentError::ProcessGetKoidFailed)?.raw_koid();
521
522        // Add process ID to the runtime dir.
523        runtime_dir.add_process_id(pid);
524
525        fuchsia_trace::instant!(
526            c"component:start",
527            c"elf",
528            fuchsia_trace::Scope::Thread,
529            "moniker" => format!("{}", moniker).as_str(),
530            "url" => resolved_url.as_str(),
531            "pid" => pid
532        );
533
534        // Add process start time to the runtime dir.
535        let process_start_instant_mono =
536            process.info().map_err(StartComponentError::ProcessInfoFailed)?.start_time;
537        runtime_dir.add_process_start_time(process_start_instant_mono.into_nanos());
538
539        // Add UTC estimate of the process start time to the runtime dir.
540        let utc_clock_started = fasync::OnSignals::new(&utc_clock_dup, zx::Signals::CLOCK_STARTED)
541            .on_timeout(zx::MonotonicInstant::after(zx::MonotonicDuration::default()), || {
542                Err(zx::Status::TIMED_OUT)
543            })
544            .await
545            .is_ok();
546
547        // The clock transformations needed to map a timestamp on a monotonic timeline
548        // to a timestamp on the UTC timeline.
549        let mono_to_clock_transformation =
550            boot_clock.get_details().map(|details| details.reference_to_synthetic).ok();
551        let boot_to_utc_transformation = utc_clock_started
552            .then(|| utc_clock_dup.get_details().map(|details| details.reference_to_synthetic).ok())
553            .flatten();
554
555        if let Some(clock_transformation) = boot_to_utc_transformation {
556            // This composes two transformations, to get from a timestamp expressed in
557            // nanoseconds on the monotonic timeline, to our best estimate of the
558            // corresponding UTC date-time.
559            //
560            // The clock transformations are computed before they are applied. If
561            // a suspend intervenes exactly between the computation and application,
562            // the timelines will drift away during sleep, causing a wrong date-time
563            // to be exposed in `runtime_dir`.
564            //
565            // This should not be a huge issue in practice, as the chances of that
566            // happening are vanishingly small.
567            let maybe_time_utc = mono_to_clock_transformation
568                .map(|t| t.apply(process_start_instant_mono))
569                .map(|time_boot| clock_transformation.apply(time_boot));
570
571            if let Some(utc_timestamp) = maybe_time_utc {
572                let utc_time_ns = utc_timestamp.into_nanos();
573                let seconds = (utc_time_ns / 1_000_000_000) as i64;
574                let nanos = (utc_time_ns % 1_000_000_000) as u32;
575                let dt = DateTime::from_timestamp(seconds, nanos).unwrap();
576
577                // If any of the above values are unavailable (unlikely), then this
578                // does not happen.
579                runtime_dir.add_process_start_time_utc_estimate(dt.to_string())
580            }
581        };
582
583        Ok(ElfComponent::new(
584            runtime_dir,
585            moniker,
586            job,
587            process,
588            lifecycle_client,
589            program_config.main_process_critical,
590            local_scope,
591            resolved_url.clone(),
592            outgoing_directory,
593            program_config,
594            start_info.component_instance.ok_or(StartComponentError::StartInfoError(
595                StartInfoError::MissingComponentInstanceToken,
596            ))?,
597        ))
598    }
599
600    pub fn get_scoped_runner(
601        self: Arc<Self>,
602        checker: ScopedPolicyChecker,
603    ) -> Arc<ScopedElfRunner> {
604        Arc::new(ScopedElfRunner { runner: self, checker })
605    }
606
607    pub fn serve_memory_reporter(&self, stream: fattribution::ProviderRequestStream) {
608        self.memory_reporter.serve(stream);
609    }
610}
611
612pub struct ScopedElfRunner {
613    runner: Arc<ElfRunner>,
614    checker: ScopedPolicyChecker,
615}
616
617impl ScopedElfRunner {
618    pub fn serve(&self, mut stream: fcrunner::ComponentRunnerRequestStream) {
619        let runner = self.runner.clone();
620        let checker = self.checker.clone();
621        self.scope().spawn(async move {
622            while let Ok(Some(request)) = stream.try_next().await {
623                match request {
624                    fcrunner::ComponentRunnerRequest::Start { start_info, controller, .. } => {
625                        start(&runner, checker.clone(), start_info, controller).await;
626                    }
627                    fcrunner::ComponentRunnerRequest::_UnknownMethod { ordinal, .. } => {
628                        warn!(ordinal:%; "Unknown ComponentRunner request");
629                    }
630                }
631            }
632        });
633    }
634
635    pub async fn start(
636        &self,
637        start_info: fcrunner::ComponentStartInfo,
638        server_end: ServerEnd<fcrunner::ComponentControllerMarker>,
639    ) {
640        start(&self.runner, self.checker.clone(), start_info, server_end).await
641    }
642
643    pub(crate) fn scope(&self) -> &ExecutionScope {
644        &self.runner.scope
645    }
646}
647
648fn is_acceptable_exit_code(moniker: &Moniker, code: i64) -> bool {
649    let moniker_name = moniker.to_string();
650    MONIKER_PREFIXES_TO_ACCEPTABLE_EXIT_CODES
651        .iter()
652        .any(|(prefix, codes)| moniker_name.starts_with(*prefix) && codes.contains(&code))
653}
654
655/// Starts a component by creating a new Job and Process for the component.
656async fn start(
657    runner: &ElfRunner,
658    checker: ScopedPolicyChecker,
659    start_info: fcrunner::ComponentStartInfo,
660    server_end: ServerEnd<fcrunner::ComponentControllerMarker>,
661) {
662    let resolved_url = start_info.resolved_url.clone().unwrap_or_else(|| "<unknown>".to_string());
663
664    let elf_component = match runner.start_component(start_info, &checker).await {
665        Ok(elf_component) => elf_component,
666        Err(err) => {
667            runner::component::report_start_error(
668                err.as_zx_status(),
669                format!("{}", err),
670                &resolved_url,
671                server_end,
672            );
673            return;
674        }
675    };
676    let elf_component_moniker = elf_component.info().get_moniker().clone();
677
678    let (termination_tx, termination_rx) = oneshot::channel::<StopInfo>();
679    // This function waits for something from the channel and
680    // returns it or Error::Internal if the channel is closed
681    let termination_fn = Box::pin(async move {
682        termination_rx
683            .await
684            .unwrap_or_else(|_| {
685                warn!("epitaph oneshot channel closed unexpectedly");
686                StopInfo::from_error(fcomp::Error::Internal, None)
687            })
688            .into()
689    });
690
691    let Some(proc_copy) = elf_component.copy_process() else {
692        runner::component::report_start_error(
693            zx::Status::from_raw(
694                i32::try_from(fcomp::Error::InstanceCannotStart.into_primitive()).unwrap(),
695            ),
696            "Component unexpectedly had no process".to_string(),
697            &resolved_url,
698            server_end,
699        );
700        return;
701    };
702
703    let component_diagnostics = elf_component
704        .info()
705        .copy_job_for_diagnostics()
706        .map(|job| ComponentDiagnostics {
707            tasks: Some(ComponentTasks {
708                component_task: Some(DiagnosticsTask::Job(job.into())),
709                ..Default::default()
710            }),
711            ..Default::default()
712        })
713        .map_err(|error| {
714            warn!(error:%; "Failed to copy job for diagnostics");
715            ()
716        })
717        .ok();
718
719    let (server_stream, control) = server_end.into_stream_and_control_handle();
720
721    // Spawn a future that watches for the process to exit
722    runner.scope.spawn({
723        let resolved_url = resolved_url.clone();
724        async move {
725            fasync::OnSignals::new(&proc_copy.as_handle_ref(), zx::Signals::PROCESS_TERMINATED)
726                .await
727                .map(|_: fidl::Signals| ()) // Discard.
728                .unwrap_or_else(|error| warn!(error:%; "error creating signal handler"));
729            // Process exit code '0' is considered a clean return.
730            // TODO(https://fxbug.dev/42134825) If we create an epitaph that indicates
731            // intentional, non-zero exit, use that for all non-0 exit
732            // codes.
733            let stop_info = match proc_copy.info() {
734                Ok(zx::ProcessInfo { return_code, .. }) => {
735                    match return_code {
736                        0 => StopInfo::from_ok(Some(return_code)),
737                        // Don't log SYSCALL_KILL codes because they are expected in the course
738                        // of normal operation. When elf_runner process a `Kill` method call for
739                        // a component it makes a zx_task_kill syscall which sets this return code.
740                        zx::sys::ZX_TASK_RETCODE_SYSCALL_KILL => StopInfo::from_error(
741                            fcomp::Error::InstanceDied.into(),
742                            Some(return_code),
743                        ),
744                        _ => {
745                            if is_acceptable_exit_code(&elf_component_moniker, return_code) {
746                                trace!(url:% = resolved_url, return_code:%; "component terminated with an acceptable non-zero exit code");
747                            } else {
748                                warn!(url:% = resolved_url, return_code:%;
749                                    "process terminated with abnormal return code");
750                            }
751                            StopInfo::from_error(fcomp::Error::InstanceDied, Some(return_code))
752                        }
753                    }
754                }
755                Err(error) => {
756                    warn!(error:%; "Unable to query process info");
757                    StopInfo::from_error(fcomp::Error::Internal, None)
758                }
759            };
760            termination_tx.send(stop_info).unwrap_or_else(|_| warn!("error sending done signal"));
761        }
762    });
763
764    let mut elf_component = elf_component;
765    runner.components.clone().add(&mut elf_component);
766
767    // Create a future which owns and serves the controller
768    // channel. The `epitaph_fn` future completes when the
769    // component's main process exits. The controller then sets the
770    // epitaph on the controller channel, closes it, and stops
771    // serving the protocol.
772    runner.scope.spawn(async move {
773        if let Some(component_diagnostics) = component_diagnostics {
774            control.send_on_publish_diagnostics(component_diagnostics).unwrap_or_else(
775                |error| warn!(url:% = resolved_url, error:%; "sending diagnostics failed"),
776            );
777        }
778        runner::component::Controller::new(elf_component, server_stream, control)
779            .serve(termination_fn)
780            .await;
781    });
782}
783
784#[cfg(test)]
785mod tests {
786    use super::runtime_dir::RuntimeDirectory;
787    use super::*;
788    use anyhow::{Context, Error};
789    use assert_matches::assert_matches;
790    use cm_config::{AllowlistEntryBuilder, JobPolicyAllowlists, SecurityPolicy};
791    use fidl::endpoints::{DiscoverableProtocolMarker, Proxy, create_endpoints, create_proxy};
792    use fidl_connector::Connect;
793    use fidl_fuchsia_component_runner::Task as DiagnosticsTask;
794    use fidl_fuchsia_logger::{LogSinkMarker, LogSinkRequestStream};
795    use fidl_fuchsia_process_lifecycle::LifecycleProxy;
796    use fidl_test_util::spawn_stream_handler;
797    use fuchsia_component::server::{ServiceFs, ServiceObjLocal};
798    use futures::channel::mpsc;
799    use futures::lock::Mutex;
800    use futures::{StreamExt, join};
801    use runner::component::Controllable;
802    use std::str::FromStr;
803    use std::task::Poll;
804    use zx::{self as zx, Task};
805    use {
806        fidl_fuchsia_component as fcomp, fidl_fuchsia_component_runner as fcrunner,
807        fidl_fuchsia_data as fdata, fidl_fuchsia_io as fio, fuchsia_async as fasync,
808    };
809
810    pub enum MockServiceRequest {
811        LogSink(LogSinkRequestStream),
812    }
813
814    pub type MockServiceFs<'a> = ServiceFs<ServiceObjLocal<'a, MockServiceRequest>>;
815
816    /// Create a new local fs and install a mock LogSink service into.
817    /// Returns the created directory and corresponding namespace entries.
818    pub fn create_fs_with_mock_logsink()
819    -> Result<(MockServiceFs<'static>, Vec<fcrunner::ComponentNamespaceEntry>), Error> {
820        let (dir_client, dir_server) = create_endpoints::<fio::DirectoryMarker>();
821
822        let mut dir = ServiceFs::new_local();
823        dir.add_fidl_service_at(LogSinkMarker::PROTOCOL_NAME, MockServiceRequest::LogSink);
824        dir.serve_connection(dir_server).context("Failed to add serving channel.")?;
825
826        let namespace = vec![fcrunner::ComponentNamespaceEntry {
827            path: Some("/svc".to_string()),
828            directory: Some(dir_client),
829            ..Default::default()
830        }];
831
832        Ok((dir, namespace))
833    }
834
835    // Provide a UTC clock to avoid reusing the system UTC clock in tests, which may
836    // limit the changes that are allowed to be made to this code. We create this clock
837    // here, and start it from current time.
838    pub fn new_utc_clock_for_tests() -> Arc<UtcClock> {
839        let reference_now = zx::BootInstant::get();
840        let system_utc_clock = duplicate_utc_clock_handle(zx::Rights::SAME_RIGHTS).unwrap();
841        let utc_now = system_utc_clock.read().unwrap();
842
843        let utc_clock_for_tests =
844            Arc::new(UtcClock::create(zx::ClockOpts::MAPPABLE, /*backstop=*/ None).unwrap());
845        // This will start the test-only UTC clock.
846        utc_clock_for_tests
847            .update(zx::ClockUpdate::builder().absolute_value(reference_now, utc_now.into()))
848            .unwrap();
849        utc_clock_for_tests
850    }
851
852    pub fn new_elf_runner_for_test() -> Arc<ElfRunner> {
853        Arc::new(ElfRunner::new(
854            job_default().duplicate(zx::Rights::SAME_RIGHTS).unwrap(),
855            Box::new(process_launcher::BuiltInConnector {}),
856            Some(new_utc_clock_for_tests()),
857            CrashRecords::new(),
858        ))
859    }
860
861    fn namespace_entry(path: &str, flags: fio::Flags) -> fcrunner::ComponentNamespaceEntry {
862        // Get a handle to /pkg
863        let ns_path = path.to_string();
864        let ns_dir = fuchsia_fs::directory::open_in_namespace(path, flags).unwrap();
865        let client_end = ns_dir.into_client_end().unwrap();
866        fcrunner::ComponentNamespaceEntry {
867            path: Some(ns_path),
868            directory: Some(client_end),
869            ..Default::default()
870        }
871    }
872
873    fn pkg_dir_namespace_entry() -> fcrunner::ComponentNamespaceEntry {
874        namespace_entry("/pkg", fio::PERM_READABLE | fio::PERM_EXECUTABLE)
875    }
876
877    fn svc_dir_namespace_entry() -> fcrunner::ComponentNamespaceEntry {
878        namespace_entry("/svc", fio::PERM_READABLE)
879    }
880
881    fn hello_world_startinfo(
882        runtime_dir: ServerEnd<fio::DirectoryMarker>,
883    ) -> fcrunner::ComponentStartInfo {
884        let ns = vec![pkg_dir_namespace_entry()];
885
886        fcrunner::ComponentStartInfo {
887            resolved_url: Some(
888                "fuchsia-pkg://fuchsia.com/elf_runner_tests#meta/hello-world-rust.cm".to_string(),
889            ),
890            program: Some(fdata::Dictionary {
891                entries: Some(vec![
892                    fdata::DictionaryEntry {
893                        key: "args".to_string(),
894                        value: Some(Box::new(fdata::DictionaryValue::StrVec(vec![
895                            "foo".to_string(),
896                            "bar".to_string(),
897                        ]))),
898                    },
899                    fdata::DictionaryEntry {
900                        key: "binary".to_string(),
901                        value: Some(Box::new(fdata::DictionaryValue::Str(
902                            "bin/hello_world_rust".to_string(),
903                        ))),
904                    },
905                ]),
906                ..Default::default()
907            }),
908            ns: Some(ns),
909            outgoing_dir: None,
910            runtime_dir: Some(runtime_dir),
911            component_instance: Some(zx::Event::create()),
912            ..Default::default()
913        }
914    }
915
916    /// ComponentStartInfo that points to a non-existent binary.
917    fn invalid_binary_startinfo(
918        runtime_dir: ServerEnd<fio::DirectoryMarker>,
919    ) -> fcrunner::ComponentStartInfo {
920        let ns = vec![pkg_dir_namespace_entry()];
921
922        fcrunner::ComponentStartInfo {
923            resolved_url: Some(
924                "fuchsia-pkg://fuchsia.com/elf_runner_tests#meta/does-not-exist.cm".to_string(),
925            ),
926            program: Some(fdata::Dictionary {
927                entries: Some(vec![fdata::DictionaryEntry {
928                    key: "binary".to_string(),
929                    value: Some(Box::new(fdata::DictionaryValue::Str(
930                        "bin/does_not_exist".to_string(),
931                    ))),
932                }]),
933                ..Default::default()
934            }),
935            ns: Some(ns),
936            outgoing_dir: None,
937            runtime_dir: Some(runtime_dir),
938            component_instance: Some(zx::Event::create()),
939            ..Default::default()
940        }
941    }
942
943    /// Creates start info for a component which runs until told to exit. The
944    /// ComponentController protocol can be used to stop the component when the
945    /// test is done inspecting the launched component.
946    pub fn lifecycle_startinfo(
947        runtime_dir: ServerEnd<fio::DirectoryMarker>,
948    ) -> fcrunner::ComponentStartInfo {
949        let ns = vec![pkg_dir_namespace_entry()];
950
951        fcrunner::ComponentStartInfo {
952            resolved_url: Some(
953                "fuchsia-pkg://fuchsia.com/lifecycle-example#meta/lifecycle.cm".to_string(),
954            ),
955            program: Some(fdata::Dictionary {
956                entries: Some(vec![
957                    fdata::DictionaryEntry {
958                        key: "args".to_string(),
959                        value: Some(Box::new(fdata::DictionaryValue::StrVec(vec![
960                            "foo".to_string(),
961                            "bar".to_string(),
962                        ]))),
963                    },
964                    fdata::DictionaryEntry {
965                        key: "binary".to_string(),
966                        value: Some(Box::new(fdata::DictionaryValue::Str(
967                            "bin/lifecycle_placeholder".to_string(),
968                        ))),
969                    },
970                    fdata::DictionaryEntry {
971                        key: "lifecycle.stop_event".to_string(),
972                        value: Some(Box::new(fdata::DictionaryValue::Str("notify".to_string()))),
973                    },
974                ]),
975                ..Default::default()
976            }),
977            ns: Some(ns),
978            outgoing_dir: None,
979            runtime_dir: Some(runtime_dir),
980            component_instance: Some(zx::Event::create()),
981            ..Default::default()
982        }
983    }
984
985    fn create_child_process(job: &zx::Job, name: &str) -> zx::Process {
986        let (process, _vmar) = job
987            .create_child_process(zx::ProcessOptions::empty(), name.as_bytes())
988            .expect("could not create process");
989        process
990    }
991
992    fn make_default_elf_component(
993        lifecycle_client: Option<LifecycleProxy>,
994        critical: bool,
995    ) -> (scoped_task::Scoped<zx::Job>, ElfComponent) {
996        let job = scoped_task::create_child_job().expect("failed to make child job");
997        let process = create_child_process(&job, "test_process");
998        let job_copy =
999            job.duplicate_handle(zx::Rights::SAME_RIGHTS).expect("job handle duplication failed");
1000        let component = ElfComponent::new(
1001            RuntimeDirectory::empty(),
1002            Moniker::default(),
1003            Job::Single(job_copy),
1004            process,
1005            lifecycle_client,
1006            critical,
1007            ExecutionScope::new(),
1008            "".to_string(),
1009            None,
1010            Default::default(),
1011            zx::Event::create(),
1012        );
1013        (job, component)
1014    }
1015
1016    // TODO(https://fxbug.dev/42073224): A variation of this is used in a couple of places. We should consider
1017    // refactoring this into a test util file.
1018    async fn read_file<'a>(root_proxy: &'a fio::DirectoryProxy, path: &'a str) -> String {
1019        let file_proxy =
1020            fuchsia_fs::directory::open_file_async(&root_proxy, path, fuchsia_fs::PERM_READABLE)
1021                .expect("Failed to open file.");
1022        let res = fuchsia_fs::file::read_to_string(&file_proxy).await;
1023        res.expect("Unable to read file.")
1024    }
1025
1026    #[fuchsia::test]
1027    async fn test_runtime_dir_entries() -> Result<(), Error> {
1028        let (runtime_dir, runtime_dir_server) = create_proxy::<fio::DirectoryMarker>();
1029        let start_info = lifecycle_startinfo(runtime_dir_server);
1030
1031        let runner = new_elf_runner_for_test();
1032        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1033            Arc::new(SecurityPolicy::default()),
1034            Moniker::root(),
1035        ));
1036        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1037
1038        runner.start(start_info, server_controller).await;
1039
1040        // Verify that args are added to the runtime directory.
1041        assert_eq!("foo", read_file(&runtime_dir, "args/0").await);
1042        assert_eq!("bar", read_file(&runtime_dir, "args/1").await);
1043
1044        // Process Id, Process Start Time, Job Id will vary with every run of this test. Here we
1045        // verify that they exist in the runtime directory, they can be parsed as integers,
1046        // they're greater than zero and they are not the same value. Those are about the only
1047        // invariants we can verify across test runs.
1048        let process_id = read_file(&runtime_dir, "elf/process_id").await.parse::<u64>()?;
1049        let process_start_time =
1050            read_file(&runtime_dir, "elf/process_start_time").await.parse::<i64>()?;
1051        let process_start_time_utc_estimate =
1052            read_file(&runtime_dir, "elf/process_start_time_utc_estimate").await;
1053        let job_id = read_file(&runtime_dir, "elf/job_id").await.parse::<u64>()?;
1054        assert!(process_id > 0);
1055        assert!(process_start_time > 0);
1056        assert!(process_start_time_utc_estimate.contains("UTC"));
1057        assert!(job_id > 0);
1058        assert_ne!(process_id, job_id);
1059
1060        controller.stop().expect("Stop request failed");
1061        // Wait for the process to exit so the test doesn't pagefault due to an invalid stdout
1062        // handle.
1063        controller.on_closed().await.expect("failed waiting for channel to close");
1064        Ok(())
1065    }
1066
1067    #[fuchsia::test]
1068    async fn test_kill_component() -> Result<(), Error> {
1069        let (job, mut component) = make_default_elf_component(None, false);
1070
1071        let job_info = job.info()?;
1072        assert!(!job_info.exited);
1073
1074        component.kill().await;
1075
1076        let h = job.as_handle_ref();
1077        fasync::OnSignals::new(&h, zx::Signals::TASK_TERMINATED)
1078            .await
1079            .expect("failed waiting for termination signal");
1080
1081        let job_info = job.info()?;
1082        assert!(job_info.exited);
1083        Ok(())
1084    }
1085
1086    #[fuchsia::test]
1087    fn test_stop_critical_component() -> Result<(), Error> {
1088        let mut exec = fasync::TestExecutor::new();
1089        // Presence of the Lifecycle channel isn't used by ElfComponent to sense
1090        // component exit, but it does modify the stop behavior and this is
1091        // what we want to test.
1092        let (lifecycle_client, _lifecycle_server) = create_proxy::<LifecycleMarker>();
1093        let (job, mut component) = make_default_elf_component(Some(lifecycle_client), true);
1094        let process = component.copy_process().unwrap();
1095        let job_info = job.info()?;
1096        assert!(!job_info.exited);
1097
1098        // Ask the runner to stop the component, it returns a future which
1099        // completes when the component closes its side of the lifecycle
1100        // channel
1101        let mut completes_when_stopped = component.stop();
1102
1103        // The returned future shouldn't complete because we're holding the
1104        // lifecycle channel open.
1105        match exec.run_until_stalled(&mut completes_when_stopped) {
1106            Poll::Ready(_) => {
1107                panic!("runner should still be waiting for lifecycle channel to stop");
1108            }
1109            _ => {}
1110        }
1111        assert_eq!(process.kill(), Ok(()));
1112
1113        exec.run_singlethreaded(&mut completes_when_stopped);
1114
1115        // Check that the runner killed the job hosting the exited component.
1116        let h = job.as_handle_ref();
1117        let termination_fut = async move {
1118            fasync::OnSignals::new(&h, zx::Signals::TASK_TERMINATED)
1119                .await
1120                .expect("failed waiting for termination signal");
1121        };
1122        exec.run_singlethreaded(termination_fut);
1123
1124        let job_info = job.info()?;
1125        assert!(job_info.exited);
1126        Ok(())
1127    }
1128
1129    #[fuchsia::test]
1130    fn test_stop_noncritical_component() -> Result<(), Error> {
1131        let mut exec = fasync::TestExecutor::new();
1132        // Presence of the Lifecycle channel isn't used by ElfComponent to sense
1133        // component exit, but it does modify the stop behavior and this is
1134        // what we want to test.
1135        let (lifecycle_client, lifecycle_server) = create_proxy::<LifecycleMarker>();
1136        let (job, mut component) = make_default_elf_component(Some(lifecycle_client), false);
1137
1138        let job_info = job.info()?;
1139        assert!(!job_info.exited);
1140
1141        // Ask the runner to stop the component, it returns a future which
1142        // completes when the component closes its side of the lifecycle
1143        // channel
1144        let mut completes_when_stopped = component.stop();
1145
1146        // The returned future shouldn't complete because we're holding the
1147        // lifecycle channel open.
1148        match exec.run_until_stalled(&mut completes_when_stopped) {
1149            Poll::Ready(_) => {
1150                panic!("runner should still be waiting for lifecycle channel to stop");
1151            }
1152            _ => {}
1153        }
1154        drop(lifecycle_server);
1155
1156        match exec.run_until_stalled(&mut completes_when_stopped) {
1157            Poll::Ready(_) => {}
1158            _ => {
1159                panic!("runner future should have completed, lifecycle channel is closed.");
1160            }
1161        }
1162        // Check that the runner killed the job hosting the exited component.
1163        let h = job.as_handle_ref();
1164        let termination_fut = async move {
1165            fasync::OnSignals::new(&h, zx::Signals::TASK_TERMINATED)
1166                .await
1167                .expect("failed waiting for termination signal");
1168        };
1169        exec.run_singlethreaded(termination_fut);
1170
1171        let job_info = job.info()?;
1172        assert!(job_info.exited);
1173        Ok(())
1174    }
1175
1176    /// Stopping a component which doesn't have a lifecycle channel should be
1177    /// equivalent to killing a component directly.
1178    #[fuchsia::test]
1179    async fn test_stop_component_without_lifecycle() -> Result<(), Error> {
1180        let (job, mut component) = make_default_elf_component(None, false);
1181
1182        let job_info = job.info()?;
1183        assert!(!job_info.exited);
1184
1185        component.stop().await;
1186
1187        let h = job.as_handle_ref();
1188        fasync::OnSignals::new(&h, zx::Signals::TASK_TERMINATED)
1189            .await
1190            .expect("failed waiting for termination signal");
1191
1192        let job_info = job.info()?;
1193        assert!(job_info.exited);
1194        Ok(())
1195    }
1196
1197    #[fuchsia::test]
1198    async fn test_stop_critical_component_with_closed_lifecycle() -> Result<(), Error> {
1199        let (lifecycle_client, lifecycle_server) = create_proxy::<LifecycleMarker>();
1200        let (job, mut component) = make_default_elf_component(Some(lifecycle_client), true);
1201        let process = component.copy_process().unwrap();
1202        let job_info = job.info()?;
1203        assert!(!job_info.exited);
1204
1205        // Close the lifecycle channel
1206        drop(lifecycle_server);
1207        // Kill the process because this is what ElfComponent monitors to
1208        // determine if the component exited.
1209        process.kill()?;
1210        component.stop().await;
1211
1212        let h = job.as_handle_ref();
1213        fasync::OnSignals::new(&h, zx::Signals::TASK_TERMINATED)
1214            .await
1215            .expect("failed waiting for termination signal");
1216
1217        let job_info = job.info()?;
1218        assert!(job_info.exited);
1219        Ok(())
1220    }
1221
1222    #[fuchsia::test]
1223    async fn test_stop_noncritical_component_with_closed_lifecycle() -> Result<(), Error> {
1224        let (lifecycle_client, lifecycle_server) = create_proxy::<LifecycleMarker>();
1225        let (job, mut component) = make_default_elf_component(Some(lifecycle_client), false);
1226
1227        let job_info = job.info()?;
1228        assert!(!job_info.exited);
1229
1230        // Close the lifecycle channel
1231        drop(lifecycle_server);
1232        // Kill the process because this is what ElfComponent monitors to
1233        // determine if the component exited.
1234        component.stop().await;
1235
1236        let h = job.as_handle_ref();
1237        fasync::OnSignals::new(&h, zx::Signals::TASK_TERMINATED)
1238            .await
1239            .expect("failed waiting for termination signal");
1240
1241        let job_info = job.info()?;
1242        assert!(job_info.exited);
1243        Ok(())
1244    }
1245
1246    /// Dropping the component should kill the job hosting it.
1247    #[fuchsia::test]
1248    async fn test_drop() -> Result<(), Error> {
1249        let (job, component) = make_default_elf_component(None, false);
1250
1251        let job_info = job.info()?;
1252        assert!(!job_info.exited);
1253
1254        drop(component);
1255
1256        let h = job.as_handle_ref();
1257        fasync::OnSignals::new(&h, zx::Signals::TASK_TERMINATED)
1258            .await
1259            .expect("failed waiting for termination signal");
1260
1261        let job_info = job.info()?;
1262        assert!(job_info.exited);
1263        Ok(())
1264    }
1265
1266    fn with_mark_vmo_exec(
1267        mut start_info: fcrunner::ComponentStartInfo,
1268    ) -> fcrunner::ComponentStartInfo {
1269        start_info.program.as_mut().map(|dict| {
1270            dict.entries.as_mut().map(|entry| {
1271                entry.push(fdata::DictionaryEntry {
1272                    key: "job_policy_ambient_mark_vmo_exec".to_string(),
1273                    value: Some(Box::new(fdata::DictionaryValue::Str("true".to_string()))),
1274                });
1275                entry
1276            })
1277        });
1278        start_info
1279    }
1280
1281    fn with_main_process_critical(
1282        mut start_info: fcrunner::ComponentStartInfo,
1283    ) -> fcrunner::ComponentStartInfo {
1284        start_info.program.as_mut().map(|dict| {
1285            dict.entries.as_mut().map(|entry| {
1286                entry.push(fdata::DictionaryEntry {
1287                    key: "main_process_critical".to_string(),
1288                    value: Some(Box::new(fdata::DictionaryValue::Str("true".to_string()))),
1289                });
1290                entry
1291            })
1292        });
1293        start_info
1294    }
1295
1296    #[fuchsia::test]
1297    async fn vmex_security_policy_denied() -> Result<(), Error> {
1298        let (_runtime_dir, runtime_dir_server) = create_endpoints::<fio::DirectoryMarker>();
1299        let start_info = with_mark_vmo_exec(lifecycle_startinfo(runtime_dir_server));
1300
1301        // Config does not allowlist any monikers to have access to the job policy.
1302        let runner = new_elf_runner_for_test();
1303        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1304            Arc::new(SecurityPolicy::default()),
1305            Moniker::root(),
1306        ));
1307        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1308
1309        // Attempting to start the component should fail, which we detect by looking for an
1310        // ACCESS_DENIED epitaph on the ComponentController's event stream.
1311        runner.start(start_info, server_controller).await;
1312        assert_matches!(
1313            controller.take_event_stream().try_next().await,
1314            Err(fidl::Error::ClientChannelClosed { status: zx::Status::ACCESS_DENIED, .. })
1315        );
1316
1317        Ok(())
1318    }
1319
1320    #[fuchsia::test]
1321    async fn vmex_security_policy_allowed() -> Result<(), Error> {
1322        let (runtime_dir, runtime_dir_server) = create_proxy::<fio::DirectoryMarker>();
1323        let start_info = with_mark_vmo_exec(lifecycle_startinfo(runtime_dir_server));
1324
1325        let policy = SecurityPolicy {
1326            job_policy: JobPolicyAllowlists {
1327                ambient_mark_vmo_exec: vec![AllowlistEntryBuilder::new().exact("foo").build()],
1328                ..Default::default()
1329            },
1330            ..Default::default()
1331        };
1332        let runner = new_elf_runner_for_test();
1333        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1334            Arc::new(policy),
1335            Moniker::try_from(["foo"]).unwrap(),
1336        ));
1337        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1338        runner.start(start_info, server_controller).await;
1339
1340        // Runtime dir won't exist if the component failed to start.
1341        let process_id = read_file(&runtime_dir, "elf/process_id").await.parse::<u64>()?;
1342        assert!(process_id > 0);
1343        // Component controller should get shutdown normally; no ACCESS_DENIED epitaph.
1344        controller.kill().expect("kill failed");
1345
1346        // We expect the event stream to have closed, which is reported as an
1347        // error and the value of the error should match the epitaph for a
1348        // process that was killed.
1349        let mut event_stream = controller.take_event_stream();
1350        expect_diagnostics_event(&mut event_stream).await;
1351
1352        let s = zx::Status::from_raw(
1353            i32::try_from(fcomp::Error::InstanceDied.into_primitive()).unwrap(),
1354        );
1355        expect_on_stop(&mut event_stream, s, Some(zx::sys::ZX_TASK_RETCODE_SYSCALL_KILL)).await;
1356        expect_channel_closed(&mut event_stream).await;
1357        Ok(())
1358    }
1359
1360    #[fuchsia::test]
1361    async fn critical_security_policy_denied() -> Result<(), Error> {
1362        let (_runtime_dir, runtime_dir_server) = create_endpoints::<fio::DirectoryMarker>();
1363        let start_info = with_main_process_critical(hello_world_startinfo(runtime_dir_server));
1364
1365        // Default policy does not allowlist any monikers to be marked as critical
1366        let runner = new_elf_runner_for_test();
1367        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1368            Arc::new(SecurityPolicy::default()),
1369            Moniker::root(),
1370        ));
1371        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1372
1373        // Attempting to start the component should fail, which we detect by looking for an
1374        // ACCESS_DENIED epitaph on the ComponentController's event stream.
1375        runner.start(start_info, server_controller).await;
1376        assert_matches!(
1377            controller.take_event_stream().try_next().await,
1378            Err(fidl::Error::ClientChannelClosed { status: zx::Status::ACCESS_DENIED, .. })
1379        );
1380
1381        Ok(())
1382    }
1383
1384    #[fuchsia::test]
1385    #[should_panic]
1386    async fn fail_to_launch_critical_component() {
1387        let (_runtime_dir, runtime_dir_server) = create_endpoints::<fio::DirectoryMarker>();
1388
1389        // ElfRunner should fail to start the component because this start_info points
1390        // to a binary that does not exist in the test package.
1391        let start_info = with_main_process_critical(invalid_binary_startinfo(runtime_dir_server));
1392
1393        // Policy does not allowlist any monikers to be marked as critical without being
1394        // allowlisted, so make sure we permit this one.
1395        let policy = SecurityPolicy {
1396            job_policy: JobPolicyAllowlists {
1397                main_process_critical: vec![AllowlistEntryBuilder::new().build()],
1398                ..Default::default()
1399            },
1400            ..Default::default()
1401        };
1402        let runner = new_elf_runner_for_test();
1403        let runner =
1404            runner.get_scoped_runner(ScopedPolicyChecker::new(Arc::new(policy), Moniker::root()));
1405        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1406
1407        runner.start(start_info, server_controller).await;
1408
1409        controller
1410            .take_event_stream()
1411            .try_next()
1412            .await
1413            .map(|_: Option<fcrunner::ComponentControllerEvent>| ()) // Discard.
1414            .unwrap_or_else(|error| warn!(error:%; "error reading from event stream"));
1415    }
1416
1417    fn hello_world_startinfo_forward_stdout_to_log(
1418        runtime_dir: ServerEnd<fio::DirectoryMarker>,
1419        mut ns: Vec<fcrunner::ComponentNamespaceEntry>,
1420    ) -> fcrunner::ComponentStartInfo {
1421        ns.push(pkg_dir_namespace_entry());
1422
1423        fcrunner::ComponentStartInfo {
1424            resolved_url: Some(
1425                "fuchsia-pkg://fuchsia.com/hello-world-rust#meta/hello-world-rust.cm".to_string(),
1426            ),
1427            program: Some(fdata::Dictionary {
1428                entries: Some(vec![
1429                    fdata::DictionaryEntry {
1430                        key: "binary".to_string(),
1431                        value: Some(Box::new(fdata::DictionaryValue::Str(
1432                            "bin/hello_world_rust".to_string(),
1433                        ))),
1434                    },
1435                    fdata::DictionaryEntry {
1436                        key: "forward_stdout_to".to_string(),
1437                        value: Some(Box::new(fdata::DictionaryValue::Str("log".to_string()))),
1438                    },
1439                    fdata::DictionaryEntry {
1440                        key: "forward_stderr_to".to_string(),
1441                        value: Some(Box::new(fdata::DictionaryValue::Str("log".to_string()))),
1442                    },
1443                ]),
1444                ..Default::default()
1445            }),
1446            ns: Some(ns),
1447            outgoing_dir: None,
1448            runtime_dir: Some(runtime_dir),
1449            component_instance: Some(zx::Event::create()),
1450            ..Default::default()
1451        }
1452    }
1453
1454    // TODO(https://fxbug.dev/42148789): Following function shares a lot of code with
1455    // //src/sys/component_manager/src/model/namespace.rs tests. Shared
1456    // functionality should be refactored into a common test util lib.
1457    #[fuchsia::test]
1458    async fn enable_stdout_and_stderr_logging() -> Result<(), Error> {
1459        let (mut dir, ns) = create_fs_with_mock_logsink()?;
1460
1461        let run_component_fut = async move {
1462            let (_runtime_dir, runtime_dir_server) = create_endpoints::<fio::DirectoryMarker>();
1463            let start_info = hello_world_startinfo_forward_stdout_to_log(runtime_dir_server, ns);
1464
1465            let runner = new_elf_runner_for_test();
1466            let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1467                Arc::new(SecurityPolicy::default()),
1468                Moniker::root(),
1469            ));
1470            let (client_controller, server_controller) =
1471                create_proxy::<fcrunner::ComponentControllerMarker>();
1472
1473            runner.start(start_info, server_controller).await;
1474            let mut event_stream = client_controller.take_event_stream();
1475            expect_diagnostics_event(&mut event_stream).await;
1476            expect_on_stop(&mut event_stream, zx::Status::OK, Some(0)).await;
1477            expect_channel_closed(&mut event_stream).await;
1478        };
1479
1480        // Just check for connection count, other integration tests cover decoding the actual logs.
1481        let service_fs_listener_fut = async {
1482            let mut requests = Vec::new();
1483            while let Some(MockServiceRequest::LogSink(r)) = dir.next().await {
1484                // The client is expecting us to send OnInit, but we're not testing that, so just
1485                // park the requests.
1486                requests.push(r);
1487            }
1488            requests.len()
1489        };
1490
1491        let connection_count = join!(run_component_fut, service_fs_listener_fut).1;
1492
1493        assert_eq!(connection_count, 1);
1494        Ok(())
1495    }
1496
1497    #[fuchsia::test]
1498    async fn on_publish_diagnostics_contains_job_handle() -> Result<(), Error> {
1499        let (runtime_dir, runtime_dir_server) = create_proxy::<fio::DirectoryMarker>();
1500        let start_info = lifecycle_startinfo(runtime_dir_server);
1501
1502        let runner = new_elf_runner_for_test();
1503        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1504            Arc::new(SecurityPolicy::default()),
1505            Moniker::root(),
1506        ));
1507        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1508
1509        runner.start(start_info, server_controller).await;
1510
1511        let job_id = read_file(&runtime_dir, "elf/job_id").await.parse::<u64>().unwrap();
1512        let mut event_stream = controller.take_event_stream();
1513        match event_stream.try_next().await {
1514            Ok(Some(fcrunner::ComponentControllerEvent::OnPublishDiagnostics {
1515                payload:
1516                    ComponentDiagnostics {
1517                        tasks:
1518                            Some(ComponentTasks {
1519                                component_task: Some(DiagnosticsTask::Job(job)), ..
1520                            }),
1521                        ..
1522                    },
1523            })) => {
1524                assert_eq!(job_id, job.get_koid().unwrap().raw_koid());
1525            }
1526            other => panic!("unexpected event result: {:?}", other),
1527        }
1528
1529        controller.stop().expect("Stop request failed");
1530        // Wait for the process to exit so the test doesn't pagefault due to an invalid stdout
1531        // handle.
1532        controller.on_closed().await.expect("failed waiting for channel to close");
1533
1534        Ok(())
1535    }
1536
1537    async fn expect_diagnostics_event(event_stream: &mut fcrunner::ComponentControllerEventStream) {
1538        let event = event_stream.try_next().await;
1539        assert_matches!(
1540            event,
1541            Ok(Some(fcrunner::ComponentControllerEvent::OnPublishDiagnostics {
1542                payload: ComponentDiagnostics {
1543                    tasks: Some(ComponentTasks {
1544                        component_task: Some(DiagnosticsTask::Job(_)),
1545                        ..
1546                    }),
1547                    ..
1548                },
1549            }))
1550        );
1551    }
1552
1553    async fn expect_on_stop(
1554        event_stream: &mut fcrunner::ComponentControllerEventStream,
1555        expected_status: zx::Status,
1556        expected_exit_code: Option<i64>,
1557    ) {
1558        let event = event_stream.try_next().await;
1559        assert_matches!(
1560            event,
1561            Ok(Some(fcrunner::ComponentControllerEvent::OnStop {
1562                payload: fcrunner::ComponentStopInfo { termination_status: Some(s), exit_code, .. },
1563            }))
1564            if s == expected_status.into_raw() &&
1565                exit_code == expected_exit_code
1566        );
1567    }
1568
1569    async fn expect_channel_closed(event_stream: &mut fcrunner::ComponentControllerEventStream) {
1570        let event = event_stream.try_next().await;
1571        match event {
1572            Ok(None) => {}
1573            other => panic!("Expected channel closed error, got {:?}", other),
1574        }
1575    }
1576
1577    /// An implementation of launcher that sends a complete launch request payload back to
1578    /// a test through an mpsc channel.
1579    struct LauncherConnectorForTest {
1580        sender: mpsc::UnboundedSender<LaunchPayload>,
1581    }
1582
1583    /// Contains all the information passed to fuchsia.process.Launcher before and up to calling
1584    /// Launch/CreateWithoutStarting.
1585    #[derive(Default)]
1586    struct LaunchPayload {
1587        launch_info: Option<fproc::LaunchInfo>,
1588        args: Vec<Vec<u8>>,
1589        environ: Vec<Vec<u8>>,
1590        name_info: Vec<fproc::NameInfo>,
1591        handles: Vec<fproc::HandleInfo>,
1592        options: u32,
1593    }
1594
1595    impl Connect for LauncherConnectorForTest {
1596        type Proxy = fproc::LauncherProxy;
1597
1598        fn connect(&self) -> Result<Self::Proxy, anyhow::Error> {
1599            let sender = self.sender.clone();
1600            let payload = Arc::new(Mutex::new(LaunchPayload::default()));
1601
1602            Ok(spawn_stream_handler(move |launcher_request| {
1603                let sender = sender.clone();
1604                let payload = payload.clone();
1605                async move {
1606                    let mut payload = payload.lock().await;
1607                    match launcher_request {
1608                        fproc::LauncherRequest::Launch { info, responder } => {
1609                            let process = create_child_process(&info.job, "test_process");
1610                            responder.send(zx::Status::OK.into_raw(), Some(process)).unwrap();
1611
1612                            let mut payload =
1613                                std::mem::replace(&mut *payload, LaunchPayload::default());
1614                            payload.launch_info = Some(info);
1615                            sender.unbounded_send(payload).unwrap();
1616                        }
1617                        fproc::LauncherRequest::CreateWithoutStarting { info: _, responder: _ } => {
1618                            unimplemented!()
1619                        }
1620                        fproc::LauncherRequest::AddArgs { mut args, control_handle: _ } => {
1621                            payload.args.append(&mut args);
1622                        }
1623                        fproc::LauncherRequest::AddEnvirons { mut environ, control_handle: _ } => {
1624                            payload.environ.append(&mut environ);
1625                        }
1626                        fproc::LauncherRequest::AddNames { mut names, control_handle: _ } => {
1627                            payload.name_info.append(&mut names);
1628                        }
1629                        fproc::LauncherRequest::AddHandles { mut handles, control_handle: _ } => {
1630                            payload.handles.append(&mut handles);
1631                        }
1632                        fproc::LauncherRequest::SetOptions { options, .. } => {
1633                            payload.options = options;
1634                        }
1635                    }
1636                }
1637            }))
1638        }
1639    }
1640
1641    #[fuchsia::test]
1642    async fn process_created_with_utc_clock_from_numbered_handles() -> Result<(), Error> {
1643        let (payload_tx, mut payload_rx) = mpsc::unbounded();
1644
1645        let connector = LauncherConnectorForTest { sender: payload_tx };
1646        let runner = ElfRunner::new(
1647            job_default().duplicate(zx::Rights::SAME_RIGHTS).unwrap(),
1648            Box::new(connector),
1649            Some(new_utc_clock_for_tests()),
1650            CrashRecords::new(),
1651        );
1652        let policy_checker = ScopedPolicyChecker::new(
1653            Arc::new(SecurityPolicy::default()),
1654            Moniker::try_from(["foo"]).unwrap(),
1655        );
1656
1657        // Create a clock and pass it to the component as the UTC clock through numbered_handles.
1658        let clock = zx::SyntheticClock::create(
1659            zx::ClockOpts::AUTO_START | zx::ClockOpts::MONOTONIC | zx::ClockOpts::MAPPABLE,
1660            None,
1661        )?;
1662        let clock_koid = clock.get_koid().unwrap();
1663
1664        let (_runtime_dir, runtime_dir_server) = create_proxy::<fio::DirectoryMarker>();
1665        let mut start_info = hello_world_startinfo(runtime_dir_server);
1666        start_info.numbered_handles = Some(vec![fproc::HandleInfo {
1667            handle: clock.into_handle(),
1668            id: HandleInfo::new(HandleType::ClockUtc, 0).as_raw(),
1669        }]);
1670
1671        // Start the component.
1672        let _ = runner
1673            .start_component(start_info, &policy_checker)
1674            .await
1675            .context("failed to start component")?;
1676
1677        let payload = payload_rx.next().await.unwrap();
1678        assert!(
1679            payload
1680                .handles
1681                .iter()
1682                .any(|handle_info| handle_info.handle.get_koid().unwrap() == clock_koid)
1683        );
1684
1685        Ok(())
1686    }
1687
1688    /// Test visiting running components using [`ComponentSet`].
1689    #[fuchsia::test]
1690    async fn test_enumerate_components() {
1691        use std::sync::atomic::{AtomicUsize, Ordering};
1692
1693        let (_runtime_dir, runtime_dir_server) = create_proxy::<fio::DirectoryMarker>();
1694        let start_info = lifecycle_startinfo(runtime_dir_server);
1695
1696        let runner = new_elf_runner_for_test();
1697        let components = runner.components.clone();
1698
1699        // Initially there are zero components.
1700        let count = Arc::new(AtomicUsize::new(0));
1701        components.clone().visit(|_, _| {
1702            count.fetch_add(1, Ordering::SeqCst);
1703        });
1704        assert_eq!(count.load(Ordering::SeqCst), 0);
1705
1706        // Run a component.
1707        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1708            Arc::new(SecurityPolicy::default()),
1709            Moniker::root(),
1710        ));
1711        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1712        runner.start(start_info, server_controller).await;
1713
1714        // There should now be one component in the set.
1715        let count = Arc::new(AtomicUsize::new(0));
1716        components.clone().visit(|elf_component: &ElfComponentInfo, _| {
1717            assert_eq!(
1718                elf_component.get_url().as_str(),
1719                "fuchsia-pkg://fuchsia.com/lifecycle-example#meta/lifecycle.cm"
1720            );
1721            count.fetch_add(1, Ordering::SeqCst);
1722        });
1723        assert_eq!(count.load(Ordering::SeqCst), 1);
1724
1725        // Stop the component.
1726        controller.stop().unwrap();
1727        controller.on_closed().await.unwrap();
1728
1729        // There should now be zero components in the set.
1730        // Keep retrying until the component is asynchronously deregistered.
1731        loop {
1732            let count = Arc::new(AtomicUsize::new(0));
1733            components.clone().visit(|_, _| {
1734                count.fetch_add(1, Ordering::SeqCst);
1735            });
1736            let count = count.load(Ordering::SeqCst);
1737            assert!(count == 0 || count == 1);
1738            if count == 0 {
1739                break;
1740            }
1741            // Yield to the executor once so that we are not starving the
1742            // asynchronous deregistration task from running.
1743            yield_to_executor().await;
1744        }
1745    }
1746
1747    async fn yield_to_executor() {
1748        let mut done = false;
1749        futures::future::poll_fn(|cx| {
1750            if done {
1751                Poll::Ready(())
1752            } else {
1753                done = true;
1754                cx.waker().wake_by_ref();
1755                Poll::Pending
1756            }
1757        })
1758        .await;
1759    }
1760
1761    /// Creates start info for a component which runs immediately escrows its
1762    /// outgoing directory and then exits.
1763    pub fn immediate_escrow_startinfo(
1764        outgoing_dir: ServerEnd<fio::DirectoryMarker>,
1765        runtime_dir: ServerEnd<fio::DirectoryMarker>,
1766    ) -> fcrunner::ComponentStartInfo {
1767        let ns = vec![
1768            pkg_dir_namespace_entry(),
1769            // Give the test component LogSink.
1770            svc_dir_namespace_entry(),
1771        ];
1772
1773        fcrunner::ComponentStartInfo {
1774            resolved_url: Some("#meta/immediate_escrow_component.cm".to_string()),
1775            program: Some(fdata::Dictionary {
1776                entries: Some(vec![
1777                    fdata::DictionaryEntry {
1778                        key: "binary".to_string(),
1779                        value: Some(Box::new(fdata::DictionaryValue::Str(
1780                            "bin/immediate_escrow".to_string(),
1781                        ))),
1782                    },
1783                    fdata::DictionaryEntry {
1784                        key: "lifecycle.stop_event".to_string(),
1785                        value: Some(Box::new(fdata::DictionaryValue::Str("notify".to_string()))),
1786                    },
1787                ]),
1788                ..Default::default()
1789            }),
1790            ns: Some(ns),
1791            outgoing_dir: Some(outgoing_dir),
1792            runtime_dir: Some(runtime_dir),
1793            component_instance: Some(zx::Event::create()),
1794            ..Default::default()
1795        }
1796    }
1797
1798    /// Test that an ELF component can send an `OnEscrow` event on its lifecycle
1799    /// channel and this event is forwarded to the `ComponentController`.
1800    #[fuchsia::test]
1801    async fn test_lifecycle_on_escrow() {
1802        let (outgoing_dir_client, outgoing_dir_server) =
1803            fidl::endpoints::create_endpoints::<fio::DirectoryMarker>();
1804        let (_, runtime_dir_server) = fidl::endpoints::create_endpoints::<fio::DirectoryMarker>();
1805        let start_info = immediate_escrow_startinfo(outgoing_dir_server, runtime_dir_server);
1806
1807        let runner = new_elf_runner_for_test();
1808        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1809            Arc::new(SecurityPolicy::default()),
1810            Moniker::root(),
1811        ));
1812        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1813
1814        runner.start(start_info, server_controller).await;
1815
1816        let mut event_stream = controller.take_event_stream();
1817
1818        expect_diagnostics_event(&mut event_stream).await;
1819
1820        match event_stream.try_next().await {
1821            Ok(Some(fcrunner::ComponentControllerEvent::OnEscrow {
1822                payload: fcrunner::ComponentControllerOnEscrowRequest { outgoing_dir, .. },
1823            })) => {
1824                let outgoing_dir_server = outgoing_dir.unwrap();
1825
1826                assert_eq!(
1827                    outgoing_dir_client.basic_info().unwrap().koid,
1828                    outgoing_dir_server.basic_info().unwrap().related_koid
1829                );
1830            }
1831            other => panic!("unexpected event result: {:?}", other),
1832        }
1833
1834        expect_on_stop(&mut event_stream, zx::Status::OK, Some(0)).await;
1835        expect_channel_closed(&mut event_stream).await;
1836    }
1837
1838    fn exit_with_code_startinfo(exit_code: i64) -> fcrunner::ComponentStartInfo {
1839        let (_runtime_dir, runtime_dir_server) = create_proxy::<fio::DirectoryMarker>();
1840        let ns = vec![pkg_dir_namespace_entry()];
1841
1842        fcrunner::ComponentStartInfo {
1843            resolved_url: Some(
1844                "fuchsia-pkg://fuchsia.com/elf_runner_tests#meta/exit-with-code.cm".to_string(),
1845            ),
1846            program: Some(fdata::Dictionary {
1847                entries: Some(vec![
1848                    fdata::DictionaryEntry {
1849                        key: "args".to_string(),
1850                        value: Some(Box::new(fdata::DictionaryValue::StrVec(vec![format!(
1851                            "{}",
1852                            exit_code
1853                        )]))),
1854                    },
1855                    fdata::DictionaryEntry {
1856                        key: "binary".to_string(),
1857                        value: Some(Box::new(fdata::DictionaryValue::Str(
1858                            "bin/exit_with_code".to_string(),
1859                        ))),
1860                    },
1861                ]),
1862                ..Default::default()
1863            }),
1864            ns: Some(ns),
1865            outgoing_dir: None,
1866            runtime_dir: Some(runtime_dir_server),
1867            component_instance: Some(zx::Event::create()),
1868            ..Default::default()
1869        }
1870    }
1871
1872    #[fuchsia::test]
1873    async fn test_return_code_success() {
1874        let start_info = exit_with_code_startinfo(0);
1875
1876        let runner = new_elf_runner_for_test();
1877        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1878            Arc::new(SecurityPolicy::default()),
1879            Moniker::root(),
1880        ));
1881        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1882        runner.start(start_info, server_controller).await;
1883
1884        let mut event_stream = controller.take_event_stream();
1885        expect_diagnostics_event(&mut event_stream).await;
1886        expect_on_stop(&mut event_stream, zx::Status::OK, Some(0)).await;
1887        expect_channel_closed(&mut event_stream).await;
1888    }
1889
1890    #[fuchsia::test]
1891    async fn test_return_code_failure() {
1892        let start_info = exit_with_code_startinfo(123);
1893
1894        let runner = new_elf_runner_for_test();
1895        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1896            Arc::new(SecurityPolicy::default()),
1897            Moniker::root(),
1898        ));
1899        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1900        runner.start(start_info, server_controller).await;
1901
1902        let mut event_stream = controller.take_event_stream();
1903        expect_diagnostics_event(&mut event_stream).await;
1904        let s = zx::Status::from_raw(
1905            i32::try_from(fcomp::Error::InstanceDied.into_primitive()).unwrap(),
1906        );
1907        expect_on_stop(&mut event_stream, s, Some(123)).await;
1908        expect_channel_closed(&mut event_stream).await;
1909    }
1910
1911    #[fuchsia::test]
1912    fn test_is_acceptable_exit_code() {
1913        // Test sshd with its acceptable code
1914        assert!(is_acceptable_exit_code(
1915            &Moniker::from_str("core/sshd-host/shell:sshd-1").expect("valid moniker"),
1916            255
1917        ));
1918
1919        // Test sshd with a non-acceptable code
1920        assert!(!is_acceptable_exit_code(
1921            &Moniker::from_str("core/sshd-host/shell:sshd-1").expect("valid moniker"),
1922            1
1923        ));
1924
1925        // Test a URL that doesn't match
1926        assert!(!is_acceptable_exit_code(
1927            &Moniker::from_str("not_core/ssh-host/shell:sshd-1").expect("valid moniker"),
1928            255
1929        ));
1930
1931        // Test an unknown component with a code that happens to be acceptable for another
1932        assert!(!is_acceptable_exit_code(
1933            &Moniker::from_str("foo/debug").expect("valid moniker"),
1934            255
1935        ));
1936    }
1937}