1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
// SPDX-License-Identifier: MPL-2.0

//! Test runner enabling control over the tests.
//!

use alloc::{collections::BTreeSet, string::String, vec::Vec};
use core::format_args;

use owo_colors::OwoColorize;

use crate::{
    path::{KtestPath, SuffixTrie},
    tree::{KtestCrate, KtestTree},
    CatchUnwindImpl, KtestError, KtestItem, KtestIter,
};

pub enum KtestResult {
    Ok,
    Failed,
}

/// Run all the tests registered by `#[ktest]` in the `.ktest_array` section.
///
/// Need to provide a print function `print` to print the test result, and a `catch_unwind`
/// implementation to catch the panic.
///
/// The `whitelist` argument is optional. If it is `None`, all tests compiled will be run.
/// If it is `Some`, only the tests whose test path being the suffix of any paths in the whitelist
/// will be run.
///
/// Returns the test result interpreted as `ok` or `FAILED`.
///
/// If a test inside a crate fails, the test runner will continue to run the rest of the tests
/// inside the crate. But the tests in the following crates will not be run.
pub fn run_ktests<PrintFn, PathsIter>(
    print: &PrintFn,
    catch_unwind: &CatchUnwindImpl,
    test_whitelist: Option<PathsIter>,
    crate_whitelist: Option<&[&str]>,
) -> KtestResult
where
    PrintFn: Fn(core::fmt::Arguments),
    PathsIter: Iterator<Item = String>,
{
    macro_rules! print {
        ($fmt: literal $(, $($arg: tt)+)?) => {
            print(format_args!($fmt $(, $($arg)+)?))
        }
    }

    let whitelist_trie =
        test_whitelist.map(|paths| SuffixTrie::from_paths(paths.map(|p| KtestPath::from(&p))));

    let tree = KtestTree::from_iter(KtestIter::new());
    print!(
        "\n[ktest runner] running {} tests in {} crates\n",
        tree.nr_tot_tests(),
        tree.nr_tot_crates()
    );
    let crate_set =
        crate_whitelist.map(|crates| crates.iter().copied().collect::<BTreeSet<&str>>());
    for crate_ in tree.iter() {
        if let Some(crate_set) = &crate_set {
            if !crate_set.contains(crate_.name()) {
                print!("\n[ktest runner] skipping crate \"{}\".\n", crate_.name());
                continue;
            }
        }
        match run_crate_ktests(crate_, print, catch_unwind, &whitelist_trie) {
            KtestResult::Ok => {}
            KtestResult::Failed => return KtestResult::Failed,
        }
    }
    print!("\n[ktest runner] All crates tested.\n");
    KtestResult::Ok
}

fn run_crate_ktests<PrintFn>(
    crate_: &KtestCrate,
    print: &PrintFn,
    catch_unwind: &CatchUnwindImpl,
    whitelist: &Option<SuffixTrie>,
) -> KtestResult
where
    PrintFn: Fn(core::fmt::Arguments),
{
    macro_rules! print {
        ($fmt: literal $(, $($arg: tt)+)?) => {
            print(format_args!($fmt $(, $($arg)+)?))
        }
    }

    let crate_name = crate_.name();
    print!(
        "\nrunning {} tests in crate \"{}\"\n\n",
        crate_.nr_tot_tests(),
        crate_name
    );

    let mut passed: usize = 0;
    let mut filtered: usize = 0;
    let mut failed_tests: Vec<(KtestItem, KtestError)> = Vec::new();
    for module in crate_.iter() {
        for test in module.iter() {
            if let Some(trie) = whitelist {
                let mut test_path = KtestPath::from(test.info().module_path);
                test_path.push_back(test.info().fn_name);
                if !trie.contains(test_path.iter()) {
                    filtered += 1;
                    continue;
                }
            }
            print!(
                "test {}::{} ...",
                test.info().module_path,
                test.info().fn_name
            );
            debug_assert_eq!(test.info().package, crate_name);
            match test.run(catch_unwind) {
                Ok(()) => {
                    print!(" {}\n", "ok".green());
                    passed += 1;
                }
                Err(e) => {
                    print!(" {}\n", "FAILED".red());
                    failed_tests.push((test.clone(), e.clone()));
                }
            }
        }
    }
    let failed = failed_tests.len();
    if failed == 0 {
        print!("\ntest result: {}.", "ok".green());
    } else {
        print!("\ntest result: {}.", "FAILED".red());
    }
    print!(
        " {} passed; {} failed; {} filtered out.\n",
        passed, failed, filtered
    );
    assert!(passed + failed + filtered == crate_.nr_tot_tests());
    if failed > 0 {
        print!("\nfailures:\n\n");
        for (t, e) in failed_tests {
            print!(
                "---- {}:{}:{} - {} ----\n\n",
                t.info().source,
                t.info().line,
                t.info().col,
                t.info().fn_name
            );
            match e {
                KtestError::Panic(s) => {
                    print!("[caught panic] {}\n", s);
                }
                KtestError::ShouldPanicButNoPanic => {
                    print!("test did not panic as expected\n");
                }
                KtestError::ExpectedPanicNotMatch(expected, s) => {
                    print!("[caught panic] expected panic not match\n");
                    print!("expected: {}\n", expected);
                    print!("caught: {}\n", s);
                }
                KtestError::Unknown => {
                    print!("[caught panic] unknown panic payload! (fatal panic handling error in ktest)\n");
                }
            }
        }
        return KtestResult::Failed;
    }
    KtestResult::Ok
}