1 //! Module providing interface for running tests in the console.
2 
3 use std::fs::File;
4 use std::io;
5 use std::io::prelude::Write;
6 use std::time::Instant;
7 
8 use super::{
9     bench::fmt_bench_samples,
10     cli::TestOpts,
11     event::{CompletedTest, TestEvent},
12     filter_tests,
13     formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter},
14     helpers::{concurrency::get_concurrency, metrics::MetricMap},
15     options::{Options, OutputFormat},
16     run_tests,
17     test_result::TestResult,
18     time::{TestExecTime, TestSuiteExecTime},
19     types::{NamePadding, TestDesc, TestDescAndFn},
20 };
21 
22 /// Generic wrapper over stdout.
23 pub enum OutputLocation<T> {
24     Pretty(Box<term::StdoutTerminal>),
25     Raw(T),
26 }
27 
28 impl<T: Write> Write for OutputLocation<T> {
write(&mut self, buf: &[u8]) -> io::Result<usize>29     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
30         match *self {
31             OutputLocation::Pretty(ref mut term) => term.write(buf),
32             OutputLocation::Raw(ref mut stdout) => stdout.write(buf),
33         }
34     }
35 
flush(&mut self) -> io::Result<()>36     fn flush(&mut self) -> io::Result<()> {
37         match *self {
38             OutputLocation::Pretty(ref mut term) => term.flush(),
39             OutputLocation::Raw(ref mut stdout) => stdout.flush(),
40         }
41     }
42 }
43 
44 pub struct ConsoleTestState {
45     pub log_out: Option<File>,
46     pub total: usize,
47     pub passed: usize,
48     pub failed: usize,
49     pub ignored: usize,
50     pub allowed_fail: usize,
51     pub filtered_out: usize,
52     pub measured: usize,
53     pub exec_time: Option<TestSuiteExecTime>,
54     pub metrics: MetricMap,
55     pub failures: Vec<(TestDesc, Vec<u8>)>,
56     pub not_failures: Vec<(TestDesc, Vec<u8>)>,
57     pub time_failures: Vec<(TestDesc, Vec<u8>)>,
58     pub options: Options,
59 }
60 
61 impl ConsoleTestState {
new(opts: &TestOpts) -> io::Result<ConsoleTestState>62     pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
63         let log_out = match opts.logfile {
64             Some(ref path) => Some(File::create(path)?),
65             None => None,
66         };
67 
68         Ok(ConsoleTestState {
69             log_out,
70             total: 0,
71             passed: 0,
72             failed: 0,
73             ignored: 0,
74             allowed_fail: 0,
75             filtered_out: 0,
76             measured: 0,
77             exec_time: None,
78             metrics: MetricMap::new(),
79             failures: Vec::new(),
80             not_failures: Vec::new(),
81             time_failures: Vec::new(),
82             options: opts.options,
83         })
84     }
85 
write_log<F, S>(&mut self, msg: F) -> io::Result<()> where S: AsRef<str>, F: FnOnce() -> S,86     pub fn write_log<F, S>(&mut self, msg: F) -> io::Result<()>
87     where
88         S: AsRef<str>,
89         F: FnOnce() -> S,
90     {
91         match self.log_out {
92             None => Ok(()),
93             Some(ref mut o) => {
94                 let msg = msg();
95                 let msg = msg.as_ref();
96                 o.write_all(msg.as_bytes())
97             }
98         }
99     }
100 
write_log_result( &mut self, test: &TestDesc, result: &TestResult, exec_time: Option<&TestExecTime>, ) -> io::Result<()>101     pub fn write_log_result(
102         &mut self,
103         test: &TestDesc,
104         result: &TestResult,
105         exec_time: Option<&TestExecTime>,
106     ) -> io::Result<()> {
107         self.write_log(|| {
108             format!(
109                 "{} {}",
110                 match *result {
111                     TestResult::TrOk => "ok".to_owned(),
112                     TestResult::TrFailed => "failed".to_owned(),
113                     TestResult::TrFailedMsg(ref msg) => format!("failed: {}", msg),
114                     TestResult::TrIgnored => "ignored".to_owned(),
115                     TestResult::TrAllowedFail => "failed (allowed)".to_owned(),
116                     TestResult::TrBench(ref bs) => fmt_bench_samples(bs),
117                     TestResult::TrTimedFail => "failed (time limit exceeded)".to_owned(),
118                 },
119                 test.name,
120             )
121         })?;
122         if let Some(exec_time) = exec_time {
123             self.write_log(|| format!(" <{}>", exec_time))?;
124         }
125         self.write_log(|| "\n")
126     }
127 
current_test_count(&self) -> usize128     fn current_test_count(&self) -> usize {
129         self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
130     }
131 }
132 
133 // List the tests to console, and optionally to logfile. Filters are honored.
list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()>134 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
135     let mut output = match term::stdout() {
136         None => OutputLocation::Raw(io::stdout()),
137         Some(t) => OutputLocation::Pretty(t),
138     };
139 
140     let quiet = opts.format == OutputFormat::Terse;
141     let mut st = ConsoleTestState::new(opts)?;
142 
143     let mut ntest = 0;
144     let mut nbench = 0;
145 
146     for test in filter_tests(&opts, tests) {
147         use crate::TestFn::*;
148 
149         let TestDescAndFn { desc: TestDesc { name, .. }, testfn } = test;
150 
151         let fntype = match testfn {
152             StaticTestFn(..) | DynTestFn(..) => {
153                 ntest += 1;
154                 "test"
155             }
156             StaticBenchFn(..) | DynBenchFn(..) => {
157                 nbench += 1;
158                 "benchmark"
159             }
160         };
161 
162         writeln!(output, "{}: {}", name, fntype)?;
163         st.write_log(|| format!("{} {}\n", fntype, name))?;
164     }
165 
166     fn plural(count: u32, s: &str) -> String {
167         match count {
168             1 => format!("{} {}", 1, s),
169             n => format!("{} {}s", n, s),
170         }
171     }
172 
173     if !quiet {
174         if ntest != 0 || nbench != 0 {
175             writeln!(output)?;
176         }
177 
178         writeln!(output, "{}, {}", plural(ntest, "test"), plural(nbench, "benchmark"))?;
179     }
180 
181     Ok(())
182 }
183 
184 // Updates `ConsoleTestState` depending on result of the test execution.
handle_test_result(st: &mut ConsoleTestState, completed_test: CompletedTest)185 fn handle_test_result(st: &mut ConsoleTestState, completed_test: CompletedTest) {
186     let test = completed_test.desc;
187     let stdout = completed_test.stdout;
188     match completed_test.result {
189         TestResult::TrOk => {
190             st.passed += 1;
191             st.not_failures.push((test, stdout));
192         }
193         TestResult::TrIgnored => st.ignored += 1,
194         TestResult::TrAllowedFail => st.allowed_fail += 1,
195         TestResult::TrBench(bs) => {
196             st.metrics.insert_metric(
197                 test.name.as_slice(),
198                 bs.ns_iter_summ.median,
199                 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
200             );
201             st.measured += 1
202         }
203         TestResult::TrFailed => {
204             st.failed += 1;
205             st.failures.push((test, stdout));
206         }
207         TestResult::TrFailedMsg(msg) => {
208             st.failed += 1;
209             let mut stdout = stdout;
210             stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
211             st.failures.push((test, stdout));
212         }
213         TestResult::TrTimedFail => {
214             st.failed += 1;
215             st.time_failures.push((test, stdout));
216         }
217     }
218 }
219 
220 // Handler for events that occur during test execution.
221 // It is provided as a callback to the `run_tests` function.
on_test_event( event: &TestEvent, st: &mut ConsoleTestState, out: &mut dyn OutputFormatter, ) -> io::Result<()>222 fn on_test_event(
223     event: &TestEvent,
224     st: &mut ConsoleTestState,
225     out: &mut dyn OutputFormatter,
226 ) -> io::Result<()> {
227     match (*event).clone() {
228         TestEvent::TeFiltered(ref filtered_tests) => {
229             st.total = filtered_tests.len();
230             out.write_run_start(filtered_tests.len())?;
231         }
232         TestEvent::TeFilteredOut(filtered_out) => {
233             st.filtered_out = filtered_out;
234         }
235         TestEvent::TeWait(ref test) => out.write_test_start(test)?,
236         TestEvent::TeTimeout(ref test) => out.write_timeout(test)?,
237         TestEvent::TeResult(completed_test) => {
238             let test = &completed_test.desc;
239             let result = &completed_test.result;
240             let exec_time = &completed_test.exec_time;
241             let stdout = &completed_test.stdout;
242 
243             st.write_log_result(test, result, exec_time.as_ref())?;
244             out.write_result(test, result, exec_time.as_ref(), &*stdout, st)?;
245             handle_test_result(st, completed_test);
246         }
247     }
248 
249     Ok(())
250 }
251 
252 /// A simple console test runner.
253 /// Runs provided tests reporting process and results to the stdout.
run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool>254 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
255     let output = match term::stdout() {
256         None => OutputLocation::Raw(io::stdout()),
257         Some(t) => OutputLocation::Pretty(t),
258     };
259 
260     let max_name_len = tests
261         .iter()
262         .max_by_key(|t| len_if_padded(*t))
263         .map(|t| t.desc.name.as_slice().len())
264         .unwrap_or(0);
265 
266     let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
267 
268     let mut out: Box<dyn OutputFormatter> = match opts.format {
269         OutputFormat::Pretty => Box::new(PrettyFormatter::new(
270             output,
271             opts.use_color(),
272             max_name_len,
273             is_multithreaded,
274             opts.time_options,
275         )),
276         OutputFormat::Terse => {
277             Box::new(TerseFormatter::new(output, opts.use_color(), max_name_len, is_multithreaded))
278         }
279         OutputFormat::Json => Box::new(JsonFormatter::new(output)),
280     };
281     let mut st = ConsoleTestState::new(opts)?;
282 
283     // Prevent the usage of `Instant` in some cases:
284     // - It's currently not supported for wasm targets.
285     // - We disable it for miri because it's not available when isolation is enabled.
286     let is_instant_supported = !cfg!(target_arch = "wasm32") && !cfg!(miri);
287 
288     let start_time = if is_instant_supported { Some(Instant::now()) } else { None };
289     run_tests(opts, tests, |x| on_test_event(&x, &mut st, &mut *out))?;
290     st.exec_time = start_time.map(|t| TestSuiteExecTime(t.elapsed()));
291 
292     assert!(st.current_test_count() == st.total);
293 
294     out.write_run_finish(&st)
295 }
296 
297 // Calculates padding for given test description.
len_if_padded(t: &TestDescAndFn) -> usize298 fn len_if_padded(t: &TestDescAndFn) -> usize {
299     match t.testfn.padding() {
300         NamePadding::PadNone => 0,
301         NamePadding::PadOnRight => t.desc.name.as_slice().len(),
302     }
303 }
304