proptest/test_runner/
runner.rs

1//-
2// Copyright 2017, 2018, 2019 The proptest developers
3//
4// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
5// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
7// option. This file may not be copied, modified, or distributed
8// except according to those terms.
9
10use crate::std_facade::{Arc, BTreeMap, Box, String, Vec};
11use core::sync::atomic::AtomicUsize;
12use core::sync::atomic::Ordering::SeqCst;
13use core::{fmt, iter};
14#[cfg(feature = "std")]
15use std::panic::{self, AssertUnwindSafe};
16
17#[cfg(feature = "fork")]
18use rusty_fork;
19#[cfg(feature = "fork")]
20use std::cell::{Cell, RefCell};
21#[cfg(feature = "fork")]
22use std::env;
23#[cfg(feature = "fork")]
24use std::fs;
25#[cfg(feature = "fork")]
26use tempfile;
27
28use crate::strategy::*;
29use crate::test_runner::config::*;
30use crate::test_runner::errors::*;
31use crate::test_runner::failure_persistence::PersistedSeed;
32use crate::test_runner::reason::*;
33#[cfg(feature = "fork")]
34use crate::test_runner::replay;
35use crate::test_runner::result_cache::*;
36use crate::test_runner::rng::TestRng;
37
38#[cfg(feature = "fork")]
39const ENV_FORK_FILE: &'static str = "_PROPTEST_FORKFILE";
40
41const ALWAYS: u32 = 0;
42/// Verbose level 1 to show failures. In state machine tests this level is used
43/// to print transitions.
44pub const INFO_LOG: u32 = 1;
45const TRACE: u32 = 2;
46
47#[cfg(feature = "std")]
48macro_rules! verbose_message {
49    ($runner:expr, $level:expr, $fmt:tt $($arg:tt)*) => { {
50        #[allow(unused_comparisons)]
51        {
52            if $runner.config.verbose >= $level {
53                eprintln!(concat!("proptest: ", $fmt) $($arg)*);
54            }
55        };
56        ()
57    } }
58}
59
60#[cfg(not(feature = "std"))]
61macro_rules! verbose_message {
62    ($runner:expr, $level:expr, $fmt:tt $($arg:tt)*) => {
63        let _ = $level;
64    };
65}
66
67type RejectionDetail = BTreeMap<Reason, u32>;
68
69/// State used when running a proptest test.
70#[derive(Clone)]
71pub struct TestRunner {
72    config: Config,
73    successes: u32,
74    local_rejects: u32,
75    global_rejects: u32,
76    rng: TestRng,
77    flat_map_regens: Arc<AtomicUsize>,
78
79    local_reject_detail: RejectionDetail,
80    global_reject_detail: RejectionDetail,
81}
82
83impl fmt::Debug for TestRunner {
84    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
85        f.debug_struct("TestRunner")
86            .field("config", &self.config)
87            .field("successes", &self.successes)
88            .field("local_rejects", &self.local_rejects)
89            .field("global_rejects", &self.global_rejects)
90            .field("rng", &"<TestRng>")
91            .field("flat_map_regens", &self.flat_map_regens)
92            .field("local_reject_detail", &self.local_reject_detail)
93            .field("global_reject_detail", &self.global_reject_detail)
94            .finish()
95    }
96}
97
98impl fmt::Display for TestRunner {
99    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
100        write!(
101            f,
102            "\tsuccesses: {}\n\
103             \tlocal rejects: {}\n",
104            self.successes, self.local_rejects
105        )?;
106        for (whence, count) in &self.local_reject_detail {
107            writeln!(f, "\t\t{} times at {}", count, whence)?;
108        }
109        writeln!(f, "\tglobal rejects: {}", self.global_rejects)?;
110        for (whence, count) in &self.global_reject_detail {
111            writeln!(f, "\t\t{} times at {}", count, whence)?;
112        }
113
114        Ok(())
115    }
116}
117
118/// Equivalent to: `TestRunner::new(Config::default())`.
119impl Default for TestRunner {
120    fn default() -> Self {
121        Self::new(Config::default())
122    }
123}
124
125#[cfg(feature = "fork")]
126#[derive(Debug)]
127struct ForkOutput {
128    file: Option<fs::File>,
129}
130
131#[cfg(feature = "fork")]
132impl ForkOutput {
133    fn append(&mut self, result: &TestCaseResult) {
134        if let Some(ref mut file) = self.file {
135            replay::append(file, result)
136                .expect("Failed to append to replay file");
137        }
138    }
139
140    fn ping(&mut self) {
141        if let Some(ref mut file) = self.file {
142            replay::ping(file).expect("Failed to append to replay file");
143        }
144    }
145
146    fn terminate(&mut self) {
147        if let Some(ref mut file) = self.file {
148            replay::terminate(file).expect("Failed to append to replay file");
149        }
150    }
151
152    fn empty() -> Self {
153        ForkOutput { file: None }
154    }
155
156    fn is_in_fork(&self) -> bool {
157        self.file.is_some()
158    }
159}
160
161#[cfg(not(feature = "fork"))]
162#[derive(Debug)]
163struct ForkOutput;
164
165#[cfg(not(feature = "fork"))]
166impl ForkOutput {
167    fn append(&mut self, _result: &TestCaseResult) {}
168    fn ping(&mut self) {}
169    fn terminate(&mut self) {}
170    fn empty() -> Self {
171        ForkOutput
172    }
173    fn is_in_fork(&self) -> bool {
174        false
175    }
176}
177
178#[cfg(not(feature = "std"))]
179fn call_test<V, F, R>(
180    _runner: &mut TestRunner,
181    case: V,
182    test: &F,
183    replay_from_fork: &mut R,
184    result_cache: &mut dyn ResultCache,
185    _: &mut ForkOutput,
186    is_from_persisted_seed: bool,
187) -> TestCaseResultV2
188where
189    V: fmt::Debug,
190    F: Fn(V) -> TestCaseResult,
191    R: Iterator<Item = TestCaseResult>,
192{
193    if let Some(result) = replay_from_fork.next() {
194        return result.map(|_| TestCaseOk::ReplayFromForkSuccess);
195    }
196
197    let cache_key = result_cache.key(&ResultCacheKey::new(&case));
198    if let Some(result) = result_cache.get(cache_key) {
199        return result.clone().map(|_| TestCaseOk::CacheHitSuccess);
200    }
201
202    let result = test(case);
203    result_cache.put(cache_key, &result);
204    result.map(|_| {
205        if is_from_persisted_seed {
206            TestCaseOk::PersistedCaseSuccess
207        } else {
208            TestCaseOk::NewCaseSuccess
209        }
210    })
211}
212
213#[cfg(feature = "std")]
214fn call_test<V, F, R>(
215    runner: &mut TestRunner,
216    case: V,
217    test: &F,
218    replay_from_fork: &mut R,
219    result_cache: &mut dyn ResultCache,
220    fork_output: &mut ForkOutput,
221    is_from_persisted_seed: bool,
222) -> TestCaseResultV2
223where
224    V: fmt::Debug,
225    F: Fn(V) -> TestCaseResult,
226    R: Iterator<Item = TestCaseResult>,
227{
228    use std::time;
229
230    let timeout = runner.config.timeout();
231
232    if let Some(result) = replay_from_fork.next() {
233        return result.map(|_| TestCaseOk::ReplayFromForkSuccess);
234    }
235
236    // Now that we're about to start a new test (as far as the replay system is
237    // concerned), ping the replay file so the parent process can determine
238    // that we made it this far.
239    fork_output.ping();
240
241    verbose_message!(runner, TRACE, "Next test input: {:?}", case);
242
243    let cache_key = result_cache.key(&ResultCacheKey::new(&case));
244    if let Some(result) = result_cache.get(cache_key) {
245        verbose_message!(
246            runner,
247            TRACE,
248            "Test input hit cache, skipping execution"
249        );
250        return result.clone().map(|_| TestCaseOk::CacheHitSuccess);
251    }
252
253    let time_start = time::Instant::now();
254
255    let mut result = unwrap_or!(
256        panic::catch_unwind(AssertUnwindSafe(|| test(case))),
257        what => Err(TestCaseError::Fail(
258            what.downcast::<&'static str>().map(|s| (*s).into())
259                .or_else(|what| what.downcast::<String>().map(|b| (*b).into()))
260                .or_else(|what| what.downcast::<Box<str>>().map(|b| (*b).into()))
261                .unwrap_or_else(|_| "<unknown panic value>".into()))));
262
263    // If there is a timeout and we exceeded it, fail the test here so we get
264    // consistent behaviour. (The parent process cannot precisely time the test
265    // cases itself.)
266    if timeout > 0 && result.is_ok() {
267        let elapsed = time_start.elapsed();
268        let elapsed_millis = elapsed.as_secs() as u32 * 1000
269            + elapsed.subsec_nanos() / 1_000_000;
270
271        if elapsed_millis > timeout {
272            result = Err(TestCaseError::fail(format!(
273                "Timeout of {} ms exceeded: test took {} ms",
274                timeout, elapsed_millis
275            )));
276        }
277    }
278
279    result_cache.put(cache_key, &result);
280    fork_output.append(&result);
281
282    match result {
283        Ok(()) => verbose_message!(runner, TRACE, "Test case passed"),
284        Err(TestCaseError::Reject(ref reason)) => {
285            verbose_message!(runner, INFO_LOG, "Test case rejected: {}", reason)
286        }
287        Err(TestCaseError::Fail(ref reason)) => {
288            verbose_message!(runner, INFO_LOG, "Test case failed: {}", reason)
289        }
290    }
291
292    result.map(|_| {
293        if is_from_persisted_seed {
294            TestCaseOk::PersistedCaseSuccess
295        } else {
296            TestCaseOk::NewCaseSuccess
297        }
298    })
299}
300
301type TestRunResult<S> = Result<(), TestError<<S as Strategy>::Value>>;
302
303impl TestRunner {
304    /// Create a fresh `TestRunner` with the given configuration.
305    ///
306    /// The runner will use an RNG with a generated seed and the default
307    /// algorithm.
308    ///
309    /// In `no_std` environments, every `TestRunner` will use the same
310    /// hard-coded seed. This seed is not contractually guaranteed and may be
311    /// changed between releases without notice.
312    pub fn new(config: Config) -> Self {
313        let algorithm = config.rng_algorithm;
314        TestRunner::new_with_rng(config, TestRng::default_rng(algorithm))
315    }
316
317    /// Create a fresh `TestRunner` with the standard deterministic RNG.
318    ///
319    /// This is sugar for the following:
320    ///
321    /// ```rust
322    /// # use proptest::test_runner::*;
323    /// let config = Config::default();
324    /// let algorithm = config.rng_algorithm;
325    /// TestRunner::new_with_rng(
326    ///     config,
327    ///     TestRng::deterministic_rng(algorithm));
328    /// ```
329    ///
330    /// Refer to `TestRng::deterministic_rng()` for more information on the
331    /// properties of the RNG used here.
332    pub fn deterministic() -> Self {
333        let config = Config::default();
334        let algorithm = config.rng_algorithm;
335        TestRunner::new_with_rng(config, TestRng::deterministic_rng(algorithm))
336    }
337
338    /// Create a fresh `TestRunner` with the given configuration and RNG.
339    pub fn new_with_rng(config: Config, rng: TestRng) -> Self {
340        TestRunner {
341            config: config,
342            successes: 0,
343            local_rejects: 0,
344            global_rejects: 0,
345            rng: rng,
346            flat_map_regens: Arc::new(AtomicUsize::new(0)),
347            local_reject_detail: BTreeMap::new(),
348            global_reject_detail: BTreeMap::new(),
349        }
350    }
351
352    /// Create a fresh `TestRunner` with the same config and global counters as
353    /// this one, but with local state reset and an independent `Rng` (but
354    /// deterministic).
355    pub(crate) fn partial_clone(&mut self) -> Self {
356        TestRunner {
357            config: self.config.clone(),
358            successes: 0,
359            local_rejects: 0,
360            global_rejects: 0,
361            rng: self.new_rng(),
362            flat_map_regens: Arc::clone(&self.flat_map_regens),
363            local_reject_detail: BTreeMap::new(),
364            global_reject_detail: BTreeMap::new(),
365        }
366    }
367
368    /// Returns the RNG for this test run.
369    pub fn rng(&mut self) -> &mut TestRng {
370        &mut self.rng
371    }
372
373    /// Create a new, independent but deterministic RNG from the RNG in this
374    /// runner.
375    pub fn new_rng(&mut self) -> TestRng {
376        self.rng.gen_rng()
377    }
378
379    /// Returns the configuration of this runner.
380    pub fn config(&self) -> &Config {
381        &self.config
382    }
383
384    /// Dumps the bytes obtained from the RNG so far (only works if the RNG is
385    /// set to `Recorder`).
386    ///
387    /// ## Panics
388    ///
389    /// Panics if the RNG does not capture generated data.
390    pub fn bytes_used(&self) -> Vec<u8> {
391        self.rng.bytes_used()
392    }
393
394    /// Run test cases against `f`, choosing inputs via `strategy`.
395    ///
396    /// If any failure cases occur, try to find a minimal failure case and
397    /// report that. If invoking `f` panics, the panic is turned into a
398    /// `TestCaseError::Fail`.
399    ///
400    /// If failure persistence is enabled, all persisted failing cases are
401    /// tested first. If a later non-persisted case fails, its seed is
402    /// persisted before returning failure.
403    ///
404    /// Returns success or failure indicating why the test as a whole failed.
405    pub fn run<S: Strategy>(
406        &mut self,
407        strategy: &S,
408        test: impl Fn(S::Value) -> TestCaseResult,
409    ) -> TestRunResult<S> {
410        if self.config.fork() {
411            self.run_in_fork(strategy, test)
412        } else {
413            self.run_in_process(strategy, test)
414        }
415    }
416
417    #[cfg(not(feature = "fork"))]
418    fn run_in_fork<S: Strategy>(
419        &mut self,
420        _: &S,
421        _: impl Fn(S::Value) -> TestCaseResult,
422    ) -> TestRunResult<S> {
423        unreachable!()
424    }
425
426    #[cfg(feature = "fork")]
427    fn run_in_fork<S: Strategy>(
428        &mut self,
429        strategy: &S,
430        test: impl Fn(S::Value) -> TestCaseResult,
431    ) -> TestRunResult<S> {
432        let mut test = Some(test);
433
434        let test_name = rusty_fork::fork_test::fix_module_path(
435            self.config
436                .test_name
437                .expect("Must supply test_name when forking enabled"),
438        );
439        let forkfile: RefCell<Option<tempfile::NamedTempFile>> =
440            RefCell::new(None);
441        let init_forkfile_size = Cell::new(0u64);
442        let seed = self.rng.new_rng_seed();
443        let mut replay = replay::Replay {
444            seed,
445            steps: vec![],
446        };
447        let mut child_count = 0;
448        let timeout = self.config.timeout();
449
450        fn forkfile_size(forkfile: &Option<tempfile::NamedTempFile>) -> u64 {
451            forkfile.as_ref().map_or(0, |ff| {
452                ff.as_file().metadata().map(|md| md.len()).unwrap_or(0)
453            })
454        }
455
456        loop {
457            let (child_error, last_fork_file_len) = rusty_fork::fork(
458                test_name,
459                rusty_fork_id!(),
460                |cmd| {
461                    let mut forkfile = forkfile.borrow_mut();
462                    if forkfile.is_none() {
463                        *forkfile =
464                            Some(tempfile::NamedTempFile::new().expect(
465                                "Failed to create temporary file for fork",
466                            ));
467                        replay.init_file(forkfile.as_mut().unwrap()).expect(
468                            "Failed to initialise temporary file for fork",
469                        );
470                    }
471
472                    init_forkfile_size.set(forkfile_size(&forkfile));
473
474                    cmd.env(ENV_FORK_FILE, forkfile.as_ref().unwrap().path());
475                },
476                |child, _| {
477                    await_child(
478                        child,
479                        &mut forkfile.borrow_mut().as_mut().unwrap(),
480                        timeout,
481                    )
482                },
483                || match self.run_in_process(strategy, test.take().unwrap()) {
484                    Ok(_) => (),
485                    Err(e) => panic!(
486                        "Test failed normally in child process.\n{}\n{}",
487                        e, self
488                    ),
489                },
490            )
491            .expect("Fork failed");
492
493            let parsed = replay::Replay::parse_from(
494                &mut forkfile.borrow_mut().as_mut().unwrap(),
495            )
496            .expect("Failed to re-read fork file");
497            match parsed {
498                replay::ReplayFileStatus::InProgress(new_replay) => {
499                    replay = new_replay
500                }
501                replay::ReplayFileStatus::Terminated(new_replay) => {
502                    replay = new_replay;
503                    break;
504                }
505                replay::ReplayFileStatus::Corrupt => {
506                    panic!("Child process corrupted replay file")
507                }
508            }
509
510            let curr_forkfile_size = forkfile_size(&forkfile.borrow());
511
512            // If the child failed to append *anything* to the forkfile, it
513            // crashed or timed out before starting even one test case, so
514            // bail.
515            if curr_forkfile_size == init_forkfile_size.get() {
516                return Err(TestError::Abort(
517                    "Child process crashed or timed out before the first test \
518                     started running; giving up."
519                        .into(),
520                ));
521            }
522
523            // The child only terminates early if it outright crashes or we
524            // kill it due to timeout, so add a synthetic failure to the
525            // output. But only do this if the length of the fork file is the
526            // same as when we last saw it, or if the child was not killed due
527            // to timeout. (This is because the child could have appended
528            // something to the file after we gave up waiting for it but before
529            // we were able to kill it).
530            if last_fork_file_len.map_or(true, |last_fork_file_len| {
531                last_fork_file_len == curr_forkfile_size
532            }) {
533                let error = Err(child_error.unwrap_or(TestCaseError::fail(
534                    "Child process was terminated abruptly \
535                     but with successful status",
536                )));
537                replay::append(forkfile.borrow_mut().as_mut().unwrap(), &error)
538                    .expect("Failed to append to replay file");
539                replay.steps.push(error);
540            }
541
542            // Bail if we've gone through too many processes in case the
543            // shrinking process itself is crashing.
544            child_count += 1;
545            if child_count >= 10000 {
546                return Err(TestError::Abort(
547                    "Giving up after 10000 child processes crashed".into(),
548                ));
549            }
550        }
551
552        // Run through the steps in-process (without ever running the actual
553        // tests) to produce the shrunken value and update the persistence
554        // file.
555        self.rng.set_seed(replay.seed);
556        self.run_in_process_with_replay(
557            strategy,
558            |_| panic!("Ran past the end of the replay"),
559            replay.steps.into_iter(),
560            ForkOutput::empty(),
561        )
562    }
563
564    fn run_in_process<S: Strategy>(
565        &mut self,
566        strategy: &S,
567        test: impl Fn(S::Value) -> TestCaseResult,
568    ) -> TestRunResult<S> {
569        let (replay_steps, fork_output) = init_replay(&mut self.rng);
570        self.run_in_process_with_replay(
571            strategy,
572            test,
573            replay_steps.into_iter(),
574            fork_output,
575        )
576    }
577
578    fn run_in_process_with_replay<S: Strategy>(
579        &mut self,
580        strategy: &S,
581        test: impl Fn(S::Value) -> TestCaseResult,
582        mut replay_from_fork: impl Iterator<Item = TestCaseResult>,
583        mut fork_output: ForkOutput,
584    ) -> TestRunResult<S> {
585        let old_rng = self.rng.clone();
586
587        let persisted_failure_seeds: Vec<PersistedSeed> = self
588            .config
589            .failure_persistence
590            .as_ref()
591            .map(|f| f.load_persisted_failures2(self.config.source_file))
592            .unwrap_or_default();
593
594        let mut result_cache = self.new_cache();
595
596        for PersistedSeed(persisted_seed) in persisted_failure_seeds {
597            self.rng.set_seed(persisted_seed);
598            self.gen_and_run_case(
599                strategy,
600                &test,
601                &mut replay_from_fork,
602                &mut *result_cache,
603                &mut fork_output,
604                true,
605            )?;
606        }
607        self.rng = old_rng;
608
609        while self.successes < self.config.cases {
610            // Generate a new seed and make an RNG from that so that we know
611            // what seed to persist if this case fails.
612            let seed = self.rng.gen_get_seed();
613            let result = self.gen_and_run_case(
614                strategy,
615                &test,
616                &mut replay_from_fork,
617                &mut *result_cache,
618                &mut fork_output,
619                false,
620            );
621            if let Err(TestError::Fail(_, ref value)) = result {
622                if let Some(ref mut failure_persistence) =
623                    self.config.failure_persistence
624                {
625                    let source_file = &self.config.source_file;
626
627                    // Don't update the persistence file if we're a child
628                    // process. The parent relies on it remaining consistent
629                    // and will take care of updating it itself.
630                    if !fork_output.is_in_fork() {
631                        failure_persistence.save_persisted_failure2(
632                            *source_file,
633                            PersistedSeed(seed),
634                            value,
635                        );
636                    }
637                }
638            }
639
640            if let Err(e) = result {
641                fork_output.terminate();
642                return Err(e.into());
643            }
644        }
645
646        fork_output.terminate();
647        Ok(())
648    }
649
650    fn gen_and_run_case<S: Strategy>(
651        &mut self,
652        strategy: &S,
653        f: &impl Fn(S::Value) -> TestCaseResult,
654        replay_from_fork: &mut impl Iterator<Item = TestCaseResult>,
655        result_cache: &mut dyn ResultCache,
656        fork_output: &mut ForkOutput,
657        is_from_persisted_seed: bool,
658    ) -> TestRunResult<S> {
659        let case = unwrap_or!(strategy.new_tree(self), msg =>
660                return Err(TestError::Abort(msg)));
661
662        // We only count new cases to our set of successful runs against
663        // `PROPTEST_CASES` config.
664        let ok_type = self.run_one_with_replay(
665            case,
666            f,
667            replay_from_fork,
668            result_cache,
669            fork_output,
670            is_from_persisted_seed,
671        )?;
672        match ok_type {
673            TestCaseOk::NewCaseSuccess | TestCaseOk::ReplayFromForkSuccess => {
674                self.successes += 1
675            }
676            TestCaseOk::PersistedCaseSuccess
677            | TestCaseOk::CacheHitSuccess
678            | TestCaseOk::Reject => (),
679        }
680
681        Ok(())
682    }
683
684    /// Run one specific test case against this runner.
685    ///
686    /// If the test fails, finds the minimal failing test case. If the test
687    /// does not fail, returns whether it succeeded or was filtered out.
688    ///
689    /// This does not honour the `fork` config, and will not be able to
690    /// terminate the run if it runs for longer than `timeout`. However, if the
691    /// test function returns but took longer than `timeout`, the test case
692    /// will fail.
693    pub fn run_one<V: ValueTree>(
694        &mut self,
695        case: V,
696        test: impl Fn(V::Value) -> TestCaseResult,
697    ) -> Result<bool, TestError<V::Value>> {
698        let mut result_cache = self.new_cache();
699        self.run_one_with_replay(
700            case,
701            test,
702            &mut iter::empty::<TestCaseResult>().fuse(),
703            &mut *result_cache,
704            &mut ForkOutput::empty(),
705            false,
706        )
707        .map(|ok_type| match ok_type {
708            TestCaseOk::Reject => false,
709            _ => true,
710        })
711    }
712
713    fn run_one_with_replay<V: ValueTree>(
714        &mut self,
715        mut case: V,
716        test: impl Fn(V::Value) -> TestCaseResult,
717        replay_from_fork: &mut impl Iterator<Item = TestCaseResult>,
718        result_cache: &mut dyn ResultCache,
719        fork_output: &mut ForkOutput,
720        is_from_persisted_seed: bool,
721    ) -> Result<TestCaseOk, TestError<V::Value>> {
722        let result = call_test(
723            self,
724            case.current(),
725            &test,
726            replay_from_fork,
727            result_cache,
728            fork_output,
729            is_from_persisted_seed,
730        );
731
732        match result {
733            Ok(success_type) => Ok(success_type),
734            Err(TestCaseError::Fail(why)) => {
735                let why = self
736                    .shrink(
737                        &mut case,
738                        test,
739                        replay_from_fork,
740                        result_cache,
741                        fork_output,
742                        is_from_persisted_seed,
743                    )
744                    .unwrap_or(why);
745                Err(TestError::Fail(why, case.current()))
746            }
747            Err(TestCaseError::Reject(whence)) => {
748                self.reject_global(whence)?;
749                Ok(TestCaseOk::Reject)
750            }
751        }
752    }
753
754    fn shrink<V: ValueTree>(
755        &mut self,
756        case: &mut V,
757        test: impl Fn(V::Value) -> TestCaseResult,
758        replay_from_fork: &mut impl Iterator<Item = TestCaseResult>,
759        result_cache: &mut dyn ResultCache,
760        fork_output: &mut ForkOutput,
761        is_from_persisted_seed: bool,
762    ) -> Option<Reason> {
763        #[cfg(feature = "std")]
764        use std::time;
765
766        let mut last_failure = None;
767        let mut iterations = 0;
768        #[cfg(feature = "std")]
769        let start_time = time::Instant::now();
770
771        if case.simplify() {
772            loop {
773                #[cfg(feature = "std")]
774                let timed_out = if self.config.max_shrink_time > 0 {
775                    let elapsed = start_time.elapsed();
776                    let elapsed_ms = elapsed
777                        .as_secs()
778                        .saturating_mul(1000)
779                        .saturating_add(elapsed.subsec_millis().into());
780                    if elapsed_ms > self.config.max_shrink_time as u64 {
781                        Some(elapsed_ms)
782                    } else {
783                        None
784                    }
785                } else {
786                    None
787                };
788                #[cfg(not(feature = "std"))]
789                let timed_out: Option<u64> = None;
790
791                let bail = if iterations >= self.config.max_shrink_iters() {
792                    #[cfg(feature = "std")]
793                    const CONTROLLER: &str =
794                        "the PROPTEST_MAX_SHRINK_ITERS environment \
795                         variable or ProptestConfig.max_shrink_iters";
796                    #[cfg(not(feature = "std"))]
797                    const CONTROLLER: &str = "ProptestConfig.max_shrink_iters";
798                    verbose_message!(
799                        self,
800                        ALWAYS,
801                        "Aborting shrinking after {} iterations (set {} \
802                         to a large(r) value to shrink more; current \
803                         configuration: {} iterations)",
804                        CONTROLLER,
805                        self.config.max_shrink_iters(),
806                        iterations
807                    );
808                    true
809                } else if let Some(ms) = timed_out {
810                    #[cfg(feature = "std")]
811                    const CONTROLLER: &str =
812                        "the PROPTEST_MAX_SHRINK_TIME environment \
813                         variable or ProptestConfig.max_shrink_time";
814                    #[cfg(feature = "std")]
815                    let current = self.config.max_shrink_time;
816                    #[cfg(not(feature = "std"))]
817                    const CONTROLLER: &str = "(not configurable in no_std)";
818                    #[cfg(not(feature = "std"))]
819                    let current = 0;
820                    verbose_message!(
821                        self,
822                        ALWAYS,
823                        "Aborting shrinking after taking too long: {} ms \
824                         (set {} to a large(r) value to shrink more; current \
825                         configuration: {} ms)",
826                        ms,
827                        CONTROLLER,
828                        current
829                    );
830                    true
831                } else {
832                    false
833                };
834
835                if bail {
836                    // Move back to the most recent failing case
837                    while case.complicate() {
838                        fork_output.append(&Ok(()));
839                    }
840                    break;
841                }
842
843                iterations += 1;
844
845                let result = call_test(
846                    self,
847                    case.current(),
848                    &test,
849                    replay_from_fork,
850                    result_cache,
851                    fork_output,
852                    is_from_persisted_seed,
853                );
854
855                match result {
856                    // Rejections are effectively a pass here,
857                    // since they indicate that any behaviour of
858                    // the function under test is acceptable.
859                    Ok(_) | Err(TestCaseError::Reject(..)) => {
860                        if !case.complicate() {
861                            break;
862                        }
863                    }
864                    Err(TestCaseError::Fail(why)) => {
865                        last_failure = Some(why);
866                        if !case.simplify() {
867                            break;
868                        }
869                    }
870                }
871            }
872        }
873
874        last_failure
875    }
876
877    /// Update the state to account for a local rejection from `whence`, and
878    /// return `Ok` if the caller should keep going or `Err` to abort.
879    pub fn reject_local(
880        &mut self,
881        whence: impl Into<Reason>,
882    ) -> Result<(), Reason> {
883        if self.local_rejects >= self.config.max_local_rejects {
884            Err("Too many local rejects".into())
885        } else {
886            self.local_rejects += 1;
887            Self::insert_or_increment(
888                &mut self.local_reject_detail,
889                whence.into(),
890            );
891            Ok(())
892        }
893    }
894
895    /// Update the state to account for a global rejection from `whence`, and
896    /// return `Ok` if the caller should keep going or `Err` to abort.
897    fn reject_global<T>(&mut self, whence: Reason) -> Result<(), TestError<T>> {
898        if self.global_rejects >= self.config.max_global_rejects {
899            Err(TestError::Abort("Too many global rejects".into()))
900        } else {
901            self.global_rejects += 1;
902            Self::insert_or_increment(&mut self.global_reject_detail, whence);
903            Ok(())
904        }
905    }
906
907    /// Insert 1 or increment the rejection detail at key for whence.
908    fn insert_or_increment(into: &mut RejectionDetail, whence: Reason) {
909        into.entry(whence)
910            .and_modify(|count| *count += 1)
911            .or_insert(1);
912    }
913
914    /// Increment the counter of flat map regenerations and return whether it
915    /// is still under the configured limit.
916    pub fn flat_map_regen(&self) -> bool {
917        self.flat_map_regens.fetch_add(1, SeqCst)
918            < self.config.max_flat_map_regens as usize
919    }
920
921    fn new_cache(&self) -> Box<dyn ResultCache> {
922        (self.config.result_cache)()
923    }
924}
925
926#[cfg(feature = "fork")]
927fn init_replay(rng: &mut TestRng) -> (Vec<TestCaseResult>, ForkOutput) {
928    use crate::test_runner::replay::{open_file, Replay, ReplayFileStatus::*};
929
930    if let Some(path) = env::var_os(ENV_FORK_FILE) {
931        let mut file = open_file(&path).expect("Failed to open replay file");
932        let loaded =
933            Replay::parse_from(&mut file).expect("Failed to read replay file");
934        match loaded {
935            InProgress(replay) => {
936                rng.set_seed(replay.seed);
937                (replay.steps, ForkOutput { file: Some(file) })
938            }
939
940            Terminated(_) => {
941                panic!("Replay file for child process is terminated?")
942            }
943
944            Corrupt => panic!("Replay file for child process is corrupt"),
945        }
946    } else {
947        (vec![], ForkOutput::empty())
948    }
949}
950
951#[cfg(not(feature = "fork"))]
952fn init_replay(
953    _rng: &mut TestRng,
954) -> (iter::Empty<TestCaseResult>, ForkOutput) {
955    (iter::empty(), ForkOutput::empty())
956}
957
958#[cfg(feature = "fork")]
959fn await_child_without_timeout(
960    child: &mut rusty_fork::ChildWrapper,
961) -> (Option<TestCaseError>, Option<u64>) {
962    let status = child.wait().expect("Failed to wait for child process");
963
964    if status.success() {
965        (None, None)
966    } else {
967        (
968            Some(TestCaseError::fail(format!(
969                "Child process exited with {}",
970                status
971            ))),
972            None,
973        )
974    }
975}
976
977#[cfg(all(feature = "fork", not(feature = "timeout")))]
978fn await_child(
979    child: &mut rusty_fork::ChildWrapper,
980    _: &mut tempfile::NamedTempFile,
981    _timeout: u32,
982) -> (Option<TestCaseError>, Option<u64>) {
983    await_child_without_timeout(child)
984}
985
986#[cfg(all(feature = "fork", feature = "timeout"))]
987fn await_child(
988    child: &mut rusty_fork::ChildWrapper,
989    forkfile: &mut tempfile::NamedTempFile,
990    timeout: u32,
991) -> (Option<TestCaseError>, Option<u64>) {
992    use std::time::Duration;
993
994    if 0 == timeout {
995        return await_child_without_timeout(child);
996    }
997
998    // The child can run for longer than the timeout since it may run
999    // multiple tests. Each time the timeout expires, we check whether the
1000    // file has grown larger. If it has, we allow the child to keep running
1001    // until the next timeout.
1002    let mut last_forkfile_len = forkfile
1003        .as_file()
1004        .metadata()
1005        .map(|md| md.len())
1006        .unwrap_or(0);
1007
1008    loop {
1009        if let Some(status) = child
1010            .wait_timeout(Duration::from_millis(timeout.into()))
1011            .expect("Failed to wait for child process")
1012        {
1013            if status.success() {
1014                return (None, None);
1015            } else {
1016                return (
1017                    Some(TestCaseError::fail(format!(
1018                        "Child process exited with {}",
1019                        status
1020                    ))),
1021                    None,
1022                );
1023            }
1024        }
1025
1026        let current_len = forkfile
1027            .as_file()
1028            .metadata()
1029            .map(|md| md.len())
1030            .unwrap_or(0);
1031        // If we've gone a full timeout period without the file growing,
1032        // fail the test and kill the child.
1033        if current_len <= last_forkfile_len {
1034            return (
1035                Some(TestCaseError::fail(format!(
1036                    "Timed out waiting for child process"
1037                ))),
1038                Some(current_len),
1039            );
1040        } else {
1041            last_forkfile_len = current_len;
1042        }
1043    }
1044}
1045
1046#[cfg(test)]
1047mod test {
1048    use std::cell::Cell;
1049    use std::fs;
1050
1051    use super::*;
1052    use crate::strategy::Strategy;
1053    use crate::test_runner::{FileFailurePersistence, RngAlgorithm, TestRng};
1054
1055    #[test]
1056    fn gives_up_after_too_many_rejections() {
1057        let config = Config::default();
1058        let mut runner = TestRunner::new(config.clone());
1059        let runs = Cell::new(0);
1060        let result = runner.run(&(0u32..), |_| {
1061            runs.set(runs.get() + 1);
1062            Err(TestCaseError::reject("reject"))
1063        });
1064        match result {
1065            Err(TestError::Abort(_)) => (),
1066            e => panic!("Unexpected result: {:?}", e),
1067        }
1068        assert_eq!(config.max_global_rejects + 1, runs.get());
1069    }
1070
1071    #[test]
1072    fn test_pass() {
1073        let mut runner = TestRunner::default();
1074        let result = runner.run(&(1u32..), |v| {
1075            assert!(v > 0);
1076            Ok(())
1077        });
1078        assert_eq!(Ok(()), result);
1079    }
1080
1081    #[test]
1082    fn test_fail_via_result() {
1083        let mut runner = TestRunner::new(Config {
1084            failure_persistence: None,
1085            ..Config::default()
1086        });
1087        let result = runner.run(&(0u32..10u32), |v| {
1088            if v < 5 {
1089                Ok(())
1090            } else {
1091                Err(TestCaseError::fail("not less than 5"))
1092            }
1093        });
1094
1095        assert_eq!(Err(TestError::Fail("not less than 5".into(), 5)), result);
1096    }
1097
1098    #[test]
1099    fn test_fail_via_panic() {
1100        let mut runner = TestRunner::new(Config {
1101            failure_persistence: None,
1102            ..Config::default()
1103        });
1104        let result = runner.run(&(0u32..10u32), |v| {
1105            assert!(v < 5, "not less than 5");
1106            Ok(())
1107        });
1108        assert_eq!(Err(TestError::Fail("not less than 5".into(), 5)), result);
1109    }
1110
1111    #[test]
1112    fn persisted_cases_do_not_count_towards_total_cases() {
1113        const FILE: &'static str = "persistence-test.txt";
1114        let _ = fs::remove_file(FILE);
1115
1116        let config = Config {
1117            failure_persistence: Some(Box::new(
1118                FileFailurePersistence::Direct(FILE),
1119            )),
1120            cases: 1,
1121            ..Config::default()
1122        };
1123
1124        let max = 10_000_000i32;
1125        {
1126            TestRunner::new(config.clone())
1127                .run(&(0i32..max), |_v| {
1128                    Err(TestCaseError::Fail("persist a failure".into()))
1129                })
1130                .expect_err("didn't fail?");
1131        }
1132
1133        let run_count = RefCell::new(0);
1134        TestRunner::new(config.clone())
1135            .run(&(0i32..max), |_v| {
1136                *run_count.borrow_mut() += 1;
1137                Ok(())
1138            })
1139            .expect("should succeed");
1140
1141        // Persisted ran, and a new case ran, and only new case counts
1142        // against `cases: 1`.
1143        assert_eq!(run_count.into_inner(), 2);
1144    }
1145
1146    #[derive(Clone, Copy, PartialEq)]
1147    struct PoorlyBehavedDebug(i32);
1148    impl fmt::Debug for PoorlyBehavedDebug {
1149        fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1150            write!(f, "\r\n{:?}\r\n", self.0)
1151        }
1152    }
1153
1154    #[test]
1155    fn failing_cases_persisted_and_reloaded() {
1156        const FILE: &'static str = "persistence-test.txt";
1157        let _ = fs::remove_file(FILE);
1158
1159        let max = 10_000_000i32;
1160        let input = (0i32..max).prop_map(PoorlyBehavedDebug);
1161        let config = Config {
1162            failure_persistence: Some(Box::new(
1163                FileFailurePersistence::Direct(FILE),
1164            )),
1165            ..Config::default()
1166        };
1167
1168        // First test with cases that fail above half max, and then below half
1169        // max, to ensure we can correctly parse both lines of the persistence
1170        // file.
1171        let first_sub_failure = {
1172            TestRunner::new(config.clone())
1173                .run(&input, |v| {
1174                    if v.0 < max / 2 {
1175                        Ok(())
1176                    } else {
1177                        Err(TestCaseError::Fail("too big".into()))
1178                    }
1179                })
1180                .expect_err("didn't fail?")
1181        };
1182        let first_super_failure = {
1183            TestRunner::new(config.clone())
1184                .run(&input, |v| {
1185                    if v.0 >= max / 2 {
1186                        Ok(())
1187                    } else {
1188                        Err(TestCaseError::Fail("too small".into()))
1189                    }
1190                })
1191                .expect_err("didn't fail?")
1192        };
1193        let second_sub_failure = {
1194            TestRunner::new(config.clone())
1195                .run(&input, |v| {
1196                    if v.0 < max / 2 {
1197                        Ok(())
1198                    } else {
1199                        Err(TestCaseError::Fail("too big".into()))
1200                    }
1201                })
1202                .expect_err("didn't fail?")
1203        };
1204        let second_super_failure = {
1205            TestRunner::new(config.clone())
1206                .run(&input, |v| {
1207                    if v.0 >= max / 2 {
1208                        Ok(())
1209                    } else {
1210                        Err(TestCaseError::Fail("too small".into()))
1211                    }
1212                })
1213                .expect_err("didn't fail?")
1214        };
1215
1216        assert_eq!(first_sub_failure, second_sub_failure);
1217        assert_eq!(first_super_failure, second_super_failure);
1218    }
1219
1220    #[test]
1221    fn new_rng_makes_separate_rng() {
1222        use rand::Rng;
1223        let mut runner = TestRunner::default();
1224        let from_1 = runner.new_rng().gen::<[u8; 16]>();
1225        let from_2 = runner.rng().gen::<[u8; 16]>();
1226        assert_ne!(from_1, from_2);
1227    }
1228
1229    #[test]
1230    fn record_rng_use() {
1231        use rand::Rng;
1232
1233        // create value with recorder rng
1234        let default_config = Config::default();
1235        let recorder_rng = TestRng::default_rng(RngAlgorithm::Recorder);
1236        let mut runner =
1237            TestRunner::new_with_rng(default_config.clone(), recorder_rng);
1238        let random_byte_array1 = runner.rng().gen::<[u8; 16]>();
1239        let bytes_used = runner.bytes_used();
1240        assert!(bytes_used.len() >= 16); // could use more bytes for some reason
1241
1242        // re-create value with pass-through rng
1243        let passthrough_rng =
1244            TestRng::from_seed(RngAlgorithm::PassThrough, &bytes_used);
1245        let mut runner =
1246            TestRunner::new_with_rng(default_config, passthrough_rng);
1247        let random_byte_array2 = runner.rng().gen::<[u8; 16]>();
1248
1249        // make sure the same value was created
1250        assert_eq!(random_byte_array1, random_byte_array2);
1251    }
1252
1253    #[cfg(feature = "fork")]
1254    #[test]
1255    fn run_successful_test_in_fork() {
1256        let mut runner = TestRunner::new(Config {
1257            fork: true,
1258            test_name: Some(concat!(
1259                module_path!(),
1260                "::run_successful_test_in_fork"
1261            )),
1262            ..Config::default()
1263        });
1264
1265        assert!(runner.run(&(0u32..1000), |_| Ok(())).is_ok());
1266    }
1267
1268    #[cfg(feature = "fork")]
1269    #[test]
1270    fn normal_failure_in_fork_results_in_correct_failure() {
1271        let mut runner = TestRunner::new(Config {
1272            fork: true,
1273            test_name: Some(concat!(
1274                module_path!(),
1275                "::normal_failure_in_fork_results_in_correct_failure"
1276            )),
1277            ..Config::default()
1278        });
1279
1280        let failure = runner
1281            .run(&(0u32..1000), |v| {
1282                prop_assert!(v < 500);
1283                Ok(())
1284            })
1285            .err()
1286            .unwrap();
1287
1288        match failure {
1289            TestError::Fail(_, value) => assert_eq!(500, value),
1290            failure => panic!("Unexpected failure: {:?}", failure),
1291        }
1292    }
1293
1294    #[cfg(feature = "fork")]
1295    #[test]
1296    fn nonsuccessful_exit_finds_correct_failure() {
1297        let mut runner = TestRunner::new(Config {
1298            fork: true,
1299            test_name: Some(concat!(
1300                module_path!(),
1301                "::nonsuccessful_exit_finds_correct_failure"
1302            )),
1303            ..Config::default()
1304        });
1305
1306        let failure = runner
1307            .run(&(0u32..1000), |v| {
1308                if v >= 500 {
1309                    ::std::process::exit(1);
1310                }
1311                Ok(())
1312            })
1313            .err()
1314            .unwrap();
1315
1316        match failure {
1317            TestError::Fail(_, value) => assert_eq!(500, value),
1318            failure => panic!("Unexpected failure: {:?}", failure),
1319        }
1320    }
1321
1322    #[cfg(feature = "fork")]
1323    #[test]
1324    fn spurious_exit_finds_correct_failure() {
1325        let mut runner = TestRunner::new(Config {
1326            fork: true,
1327            test_name: Some(concat!(
1328                module_path!(),
1329                "::spurious_exit_finds_correct_failure"
1330            )),
1331            ..Config::default()
1332        });
1333
1334        let failure = runner
1335            .run(&(0u32..1000), |v| {
1336                if v >= 500 {
1337                    ::std::process::exit(0);
1338                }
1339                Ok(())
1340            })
1341            .err()
1342            .unwrap();
1343
1344        match failure {
1345            TestError::Fail(_, value) => assert_eq!(500, value),
1346            failure => panic!("Unexpected failure: {:?}", failure),
1347        }
1348    }
1349
1350    #[cfg(feature = "timeout")]
1351    #[test]
1352    fn long_sleep_timeout_finds_correct_failure() {
1353        let mut runner = TestRunner::new(Config {
1354            fork: true,
1355            timeout: 500,
1356            test_name: Some(concat!(
1357                module_path!(),
1358                "::long_sleep_timeout_finds_correct_failure"
1359            )),
1360            ..Config::default()
1361        });
1362
1363        let failure = runner
1364            .run(&(0u32..1000), |v| {
1365                if v >= 500 {
1366                    ::std::thread::sleep(::std::time::Duration::from_millis(
1367                        10_000,
1368                    ));
1369                }
1370                Ok(())
1371            })
1372            .err()
1373            .unwrap();
1374
1375        match failure {
1376            TestError::Fail(_, value) => assert_eq!(500, value),
1377            failure => panic!("Unexpected failure: {:?}", failure),
1378        }
1379    }
1380
1381    #[cfg(feature = "timeout")]
1382    #[test]
1383    fn mid_sleep_timeout_finds_correct_failure() {
1384        let mut runner = TestRunner::new(Config {
1385            fork: true,
1386            timeout: 500,
1387            test_name: Some(concat!(
1388                module_path!(),
1389                "::mid_sleep_timeout_finds_correct_failure"
1390            )),
1391            ..Config::default()
1392        });
1393
1394        let failure = runner
1395            .run(&(0u32..1000), |v| {
1396                if v >= 500 {
1397                    // Sleep a little longer than the timeout. This means that
1398                    // sometimes the test case itself will return before the parent
1399                    // process has noticed the child is timing out, so it's up to
1400                    // the child to mark it as a failure.
1401                    ::std::thread::sleep(::std::time::Duration::from_millis(
1402                        600,
1403                    ));
1404                } else {
1405                    // Sleep a bit so that the parent and child timing don't stay
1406                    // in sync.
1407                    ::std::thread::sleep(::std::time::Duration::from_millis(
1408                        100,
1409                    ))
1410                }
1411                Ok(())
1412            })
1413            .err()
1414            .unwrap();
1415
1416        match failure {
1417            TestError::Fail(_, value) => assert_eq!(500, value),
1418            failure => panic!("Unexpected failure: {:?}", failure),
1419        }
1420    }
1421
1422    #[cfg(feature = "std")]
1423    #[test]
1424    fn duplicate_tests_not_run_with_basic_result_cache() {
1425        use std::cell::{Cell, RefCell};
1426        use std::collections::HashSet;
1427        use std::rc::Rc;
1428
1429        for _ in 0..256 {
1430            let mut runner = TestRunner::new(Config {
1431                failure_persistence: None,
1432                result_cache:
1433                    crate::test_runner::result_cache::basic_result_cache,
1434                ..Config::default()
1435            });
1436            let pass = Rc::new(Cell::new(true));
1437            let seen = Rc::new(RefCell::new(HashSet::new()));
1438            let result =
1439                runner.run(&(0u32..65536u32).prop_map(|v| v % 10), |val| {
1440                    if !seen.borrow_mut().insert(val) {
1441                        println!("Value {} seen more than once", val);
1442                        pass.set(false);
1443                    }
1444
1445                    prop_assert!(val <= 5);
1446                    Ok(())
1447                });
1448
1449            assert!(pass.get());
1450            if let Err(TestError::Fail(_, val)) = result {
1451                assert_eq!(6, val);
1452            } else {
1453                panic!("Incorrect result: {:?}", result);
1454            }
1455        }
1456    }
1457}
1458
1459#[cfg(all(feature = "fork", feature = "timeout", test))]
1460mod timeout_tests {
1461    use core::u32;
1462    use std::thread;
1463    use std::time::Duration;
1464
1465    use super::*;
1466
1467    rusty_fork_test! {
1468        #![rusty_fork(timeout_ms = 4_000)]
1469
1470        #[test]
1471        fn max_shrink_iters_works() {
1472            test_shrink_bail(Config {
1473                max_shrink_iters: 5,
1474                .. Config::default()
1475            });
1476        }
1477
1478        #[test]
1479        fn max_shrink_time_works() {
1480            test_shrink_bail(Config {
1481                max_shrink_time: 1000,
1482                .. Config::default()
1483            });
1484        }
1485
1486        #[test]
1487        fn max_shrink_iters_works_with_forking() {
1488            test_shrink_bail(Config {
1489                fork: true,
1490                test_name: Some(
1491                    concat!(module_path!(),
1492                            "::max_shrink_iters_works_with_forking")),
1493                max_shrink_time: 1000,
1494                .. Config::default()
1495            });
1496        }
1497
1498        #[test]
1499        fn detects_child_failure_to_start() {
1500            let mut runner = TestRunner::new(Config {
1501                timeout: 100,
1502                test_name: Some(
1503                    concat!(module_path!(),
1504                            "::detects_child_failure_to_start")),
1505                .. Config::default()
1506            });
1507            let result = runner.run(&Just(()).prop_map(|()| {
1508                thread::sleep(Duration::from_millis(200))
1509            }), Ok);
1510
1511            if let Err(TestError::Abort(_)) = result {
1512                // OK
1513            } else {
1514                panic!("Unexpected result: {:?}", result);
1515            }
1516        }
1517    }
1518
1519    fn test_shrink_bail(config: Config) {
1520        let mut runner = TestRunner::new(config);
1521        let result = runner.run(&crate::num::u64::ANY, |v| {
1522            thread::sleep(Duration::from_millis(250));
1523            prop_assert!(v <= u32::MAX as u64);
1524            Ok(())
1525        });
1526
1527        if let Err(TestError::Fail(_, value)) = result {
1528            // Ensure the final value was in fact a failing case.
1529            assert!(value > u32::MAX as u64);
1530        } else {
1531            panic!("Unexpected result: {:?}", result);
1532        }
1533    }
1534}