]> git.proxmox.com Git - rustc.git/blob - src/libtest/lib.rs
New upstream version 1.33.0+dfsg1
[rustc.git] / src / libtest / lib.rs
1 //! Support code for rustc's built in unit-test and micro-benchmarking
2 //! framework.
3 //!
4 //! Almost all user code will only be interested in `Bencher` and
5 //! `black_box`. All other interactions (such as writing tests and
6 //! benchmarks themselves) should be done via the `#[test]` and
7 //! `#[bench]` attributes.
8 //!
9 //! See the [Testing Chapter](../book/first-edition/testing.html) of the book for more details.
10
11 // Currently, not much of this is meant for users. It is intended to
12 // support the simplest interface possible for representing and
13 // running tests while providing a base that other test frameworks may
14 // build off of.
15
16 // N.B., this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
17 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
18 // cargo) to detect this crate.
19
20 #![crate_name = "test"]
21 #![unstable(feature = "test", issue = "27812")]
22 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
23 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
24 html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
25 #![feature(asm)]
26 #![cfg_attr(stage0, feature(cfg_target_vendor))]
27 #![feature(fnbox)]
28 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
29 #![feature(nll)]
30 #![feature(set_stdio)]
31 #![feature(panic_unwind)]
32 #![feature(staged_api)]
33 #![feature(termination_trait_lib)]
34 #![feature(test)]
35
36 extern crate getopts;
37 #[cfg(any(unix, target_os = "cloudabi"))]
38 extern crate libc;
39 extern crate term;
40
41 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
42 // on aarch64-pc-windows-msvc, so we don't link libtest against
43 // libunwind (for the time being), even though it means that
44 // libtest won't be fully functional on this platform.
45 //
46 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
47 #[cfg(not(all(windows, target_arch = "aarch64")))]
48 extern crate panic_unwind;
49
50 pub use self::TestFn::*;
51 pub use self::ColorConfig::*;
52 pub use self::TestResult::*;
53 pub use self::TestName::*;
54 use self::TestEvent::*;
55 use self::NamePadding::*;
56 use self::OutputLocation::*;
57
58 use std::panic::{catch_unwind, AssertUnwindSafe};
59 use std::any::Any;
60 use std::boxed::FnBox;
61 use std::cmp;
62 use std::collections::BTreeMap;
63 use std::env;
64 use std::fmt;
65 use std::fs::File;
66 use std::io::prelude::*;
67 use std::io;
68 use std::path::PathBuf;
69 use std::process::Termination;
70 use std::sync::mpsc::{channel, Sender};
71 use std::sync::{Arc, Mutex};
72 use std::thread;
73 use std::time::{Duration, Instant};
74 use std::borrow::Cow;
75 use std::process;
76
77 const TEST_WARN_TIMEOUT_S: u64 = 60;
78 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
79
80 // to be used by rustc to compile tests in libtest
81 pub mod test {
82 pub use {assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
83 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, ShouldPanic,
84 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName,
85 TestOpts, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
86 }
87
88 pub mod stats;
89 mod formatters;
90
91 use formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
92
93 /// Whether to execute tests concurrently or not
94 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
95 pub enum Concurrent { Yes, No }
96
97 // The name of a test. By convention this follows the rules for rust
98 // paths; i.e., it should be a series of identifiers separated by double
99 // colons. This way if some test runner wants to arrange the tests
100 // hierarchically it may.
101
102 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
103 pub enum TestName {
104 StaticTestName(&'static str),
105 DynTestName(String),
106 AlignedTestName(Cow<'static, str>, NamePadding),
107 }
108 impl TestName {
109 fn as_slice(&self) -> &str {
110 match *self {
111 StaticTestName(s) => s,
112 DynTestName(ref s) => s,
113 AlignedTestName(ref s, _) => &*s,
114 }
115 }
116
117 fn padding(&self) -> NamePadding {
118 match self {
119 &AlignedTestName(_, p) => p,
120 _ => PadNone,
121 }
122 }
123
124 fn with_padding(&self, padding: NamePadding) -> TestName {
125 let name = match self {
126 &TestName::StaticTestName(name) => Cow::Borrowed(name),
127 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
128 &TestName::AlignedTestName(ref name, _) => name.clone(),
129 };
130
131 TestName::AlignedTestName(name, padding)
132 }
133 }
134 impl fmt::Display for TestName {
135 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
136 fmt::Display::fmt(self.as_slice(), f)
137 }
138 }
139
140 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
141 pub enum NamePadding {
142 PadNone,
143 PadOnRight,
144 }
145
146 impl TestDesc {
147 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
148 let mut name = String::from(self.name.as_slice());
149 let fill = column_count.saturating_sub(name.len());
150 let pad = " ".repeat(fill);
151 match align {
152 PadNone => name,
153 PadOnRight => {
154 name.push_str(&pad);
155 name
156 }
157 }
158 }
159 }
160
161 /// Represents a benchmark function.
162 pub trait TDynBenchFn: Send {
163 fn run(&self, harness: &mut Bencher);
164 }
165
166 // A function that runs a test. If the function returns successfully,
167 // the test succeeds; if the function panics then the test fails. We
168 // may need to come up with a more clever definition of test in order
169 // to support isolation of tests into threads.
170 pub enum TestFn {
171 StaticTestFn(fn()),
172 StaticBenchFn(fn(&mut Bencher)),
173 DynTestFn(Box<dyn FnBox() + Send>),
174 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
175 }
176
177 impl TestFn {
178 fn padding(&self) -> NamePadding {
179 match *self {
180 StaticTestFn(..) => PadNone,
181 StaticBenchFn(..) => PadOnRight,
182 DynTestFn(..) => PadNone,
183 DynBenchFn(..) => PadOnRight,
184 }
185 }
186 }
187
188 impl fmt::Debug for TestFn {
189 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
190 f.write_str(match *self {
191 StaticTestFn(..) => "StaticTestFn(..)",
192 StaticBenchFn(..) => "StaticBenchFn(..)",
193 DynTestFn(..) => "DynTestFn(..)",
194 DynBenchFn(..) => "DynBenchFn(..)",
195 })
196 }
197 }
198
199 /// Manager of the benchmarking runs.
200 ///
201 /// This is fed into functions marked with `#[bench]` to allow for
202 /// set-up & tear-down before running a piece of code repeatedly via a
203 /// call to `iter`.
204 #[derive(Clone)]
205 pub struct Bencher {
206 mode: BenchMode,
207 summary: Option<stats::Summary>,
208 pub bytes: u64,
209 }
210
211 #[derive(Clone, PartialEq, Eq)]
212 pub enum BenchMode {
213 Auto,
214 Single,
215 }
216
217 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
218 pub enum ShouldPanic {
219 No,
220 Yes,
221 YesWithMessage(&'static str),
222 }
223
224 // The definition of a single test. A test runner will run a list of
225 // these.
226 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
227 pub struct TestDesc {
228 pub name: TestName,
229 pub ignore: bool,
230 pub should_panic: ShouldPanic,
231 pub allow_fail: bool,
232 }
233
234 #[derive(Debug)]
235 pub struct TestDescAndFn {
236 pub desc: TestDesc,
237 pub testfn: TestFn,
238 }
239
240 #[derive(Clone, PartialEq, Debug, Copy)]
241 pub struct Metric {
242 value: f64,
243 noise: f64,
244 }
245
246 impl Metric {
247 pub fn new(value: f64, noise: f64) -> Metric {
248 Metric { value, noise }
249 }
250 }
251
252 /// In case we want to add other options as well, just add them in this struct.
253 #[derive(Copy, Clone, Debug)]
254 pub struct Options {
255 display_output: bool,
256 }
257
258 impl Options {
259 pub fn new() -> Options {
260 Options {
261 display_output: false,
262 }
263 }
264
265 pub fn display_output(mut self, display_output: bool) -> Options {
266 self.display_output = display_output;
267 self
268 }
269 }
270
271 // The default console test runner. It accepts the command line
272 // arguments and a vector of test_descs.
273 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
274 let mut opts = match parse_opts(args) {
275 Some(Ok(o)) => o,
276 Some(Err(msg)) => {
277 eprintln!("error: {}", msg);
278 process::exit(101);
279 }
280 None => return,
281 };
282
283 opts.options = options;
284 if opts.list {
285 if let Err(e) = list_tests_console(&opts, tests) {
286 eprintln!("error: io error when listing tests: {:?}", e);
287 process::exit(101);
288 }
289 } else {
290 match run_tests_console(&opts, tests) {
291 Ok(true) => {}
292 Ok(false) => process::exit(101),
293 Err(e) => {
294 eprintln!("error: io error when listing tests: {:?}", e);
295 process::exit(101);
296 }
297 }
298 }
299 }
300
301 // A variant optimized for invocation with a static test vector.
302 // This will panic (intentionally) when fed any dynamic tests, because
303 // it is copying the static values out into a dynamic vector and cannot
304 // copy dynamic values. It is doing this because from this point on
305 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
306 // semantics into parallel test runners, which in turn requires a Vec<>
307 // rather than a &[].
308 pub fn test_main_static(tests: &[&TestDescAndFn]) {
309 let args = env::args().collect::<Vec<_>>();
310 let owned_tests = tests
311 .iter()
312 .map(|t| match t.testfn {
313 StaticTestFn(f) => TestDescAndFn {
314 testfn: StaticTestFn(f),
315 desc: t.desc.clone(),
316 },
317 StaticBenchFn(f) => TestDescAndFn {
318 testfn: StaticBenchFn(f),
319 desc: t.desc.clone(),
320 },
321 _ => panic!("non-static tests passed to test::test_main_static"),
322 })
323 .collect();
324 test_main(&args, owned_tests, Options::new())
325 }
326
327 /// Invoked when unit tests terminate. Should panic if the unit
328 /// test is considered a failure. By default, invokes `report()`
329 /// and checks for a `0` result.
330 pub fn assert_test_result<T: Termination>(result: T) {
331 let code = result.report();
332 assert_eq!(
333 code,
334 0,
335 "the test returned a termination value with a non-zero status code ({}) \
336 which indicates a failure",
337 code
338 );
339 }
340
341 #[derive(Copy, Clone, Debug)]
342 pub enum ColorConfig {
343 AutoColor,
344 AlwaysColor,
345 NeverColor,
346 }
347
348 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
349 pub enum OutputFormat {
350 Pretty,
351 Terse,
352 Json,
353 }
354
355 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
356 pub enum RunIgnored {
357 Yes,
358 No,
359 Only,
360 }
361
362 #[derive(Debug)]
363 pub struct TestOpts {
364 pub list: bool,
365 pub filter: Option<String>,
366 pub filter_exact: bool,
367 pub run_ignored: RunIgnored,
368 pub run_tests: bool,
369 pub bench_benchmarks: bool,
370 pub logfile: Option<PathBuf>,
371 pub nocapture: bool,
372 pub color: ColorConfig,
373 pub format: OutputFormat,
374 pub test_threads: Option<usize>,
375 pub skip: Vec<String>,
376 pub options: Options,
377 }
378
379 impl TestOpts {
380 #[cfg(test)]
381 fn new() -> TestOpts {
382 TestOpts {
383 list: false,
384 filter: None,
385 filter_exact: false,
386 run_ignored: RunIgnored::No,
387 run_tests: false,
388 bench_benchmarks: false,
389 logfile: None,
390 nocapture: false,
391 color: AutoColor,
392 format: OutputFormat::Pretty,
393 test_threads: None,
394 skip: vec![],
395 options: Options::new(),
396 }
397 }
398 }
399
400 /// Result of parsing the options.
401 pub type OptRes = Result<TestOpts, String>;
402
403 fn optgroups() -> getopts::Options {
404 let mut opts = getopts::Options::new();
405 opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
406 .optflag("", "ignored", "Run only ignored tests")
407 .optflag("", "test", "Run tests and not benchmarks")
408 .optflag("", "bench", "Run benchmarks instead of tests")
409 .optflag("", "list", "List all tests and benchmarks")
410 .optflag("h", "help", "Display this message (longer with --help)")
411 .optopt(
412 "",
413 "logfile",
414 "Write logs to the specified file instead \
415 of stdout",
416 "PATH",
417 )
418 .optflag(
419 "",
420 "nocapture",
421 "don't capture stdout/stderr of each \
422 task, allow printing directly",
423 )
424 .optopt(
425 "",
426 "test-threads",
427 "Number of threads used for running tests \
428 in parallel",
429 "n_threads",
430 )
431 .optmulti(
432 "",
433 "skip",
434 "Skip tests whose names contain FILTER (this flag can \
435 be used multiple times)",
436 "FILTER",
437 )
438 .optflag(
439 "q",
440 "quiet",
441 "Display one character per test instead of one line. \
442 Alias to --format=terse",
443 )
444 .optflag(
445 "",
446 "exact",
447 "Exactly match filters rather than by substring",
448 )
449 .optopt(
450 "",
451 "color",
452 "Configure coloring of output:
453 auto = colorize if stdout is a tty and tests are run on serially (default);
454 always = always colorize output;
455 never = never colorize output;",
456 "auto|always|never",
457 )
458 .optopt(
459 "",
460 "format",
461 "Configure formatting of output:
462 pretty = Print verbose output;
463 terse = Display one character per test;
464 json = Output a json document",
465 "pretty|terse|json",
466 )
467 .optopt(
468 "Z",
469 "",
470 "Enable nightly-only flags:
471 unstable-options = Allow use of experimental features",
472 "unstable-options",
473 );
474 return opts;
475 }
476
477 fn usage(binary: &str, options: &getopts::Options) {
478 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
479 println!(
480 r#"{usage}
481
482 The FILTER string is tested against the name of all tests, and only those
483 tests whose names contain the filter are run.
484
485 By default, all tests are run in parallel. This can be altered with the
486 --test-threads flag or the RUST_TEST_THREADS environment variable when running
487 tests (set it to 1).
488
489 All tests have their standard output and standard error captured by default.
490 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
491 environment variable to a value other than "0". Logging is not captured by default.
492
493 Test Attributes:
494
495 #[test] - Indicates a function is a test to be run. This function
496 takes no arguments.
497 #[bench] - Indicates a function is a benchmark to be run. This
498 function takes one argument (test::Bencher).
499 #[should_panic] - This function (also labeled with #[test]) will only pass if
500 the code causes a panic (an assertion failure or panic!)
501 A message may be provided, which the failure string must
502 contain: #[should_panic(expected = "foo")].
503 #[ignore] - When applied to a function which is already attributed as a
504 test, then the test runner will ignore these tests during
505 normal test runs. Running with --ignored or --include-ignored will run
506 these tests."#,
507 usage = options.usage(&message)
508 );
509 }
510
511 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
512 fn is_nightly() -> bool {
513 // Whether this is a feature-staged build, i.e., on the beta or stable channel
514 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
515 // Whether we should enable unstable features for bootstrapping
516 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
517
518 bootstrap || !disable_unstable_features
519 }
520
521 // Parses command line arguments into test options
522 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
523 let mut allow_unstable = false;
524 let opts = optgroups();
525 let args = args.get(1..).unwrap_or(args);
526 let matches = match opts.parse(args) {
527 Ok(m) => m,
528 Err(f) => return Some(Err(f.to_string())),
529 };
530
531 if let Some(opt) = matches.opt_str("Z") {
532 if !is_nightly() {
533 return Some(Err(
534 "the option `Z` is only accepted on the nightly compiler".into(),
535 ));
536 }
537
538 match &*opt {
539 "unstable-options" => {
540 allow_unstable = true;
541 }
542 _ => {
543 return Some(Err("Unrecognized option to `Z`".into()));
544 }
545 }
546 };
547
548 if matches.opt_present("h") {
549 usage(&args[0], &opts);
550 return None;
551 }
552
553 let filter = if !matches.free.is_empty() {
554 Some(matches.free[0].clone())
555 } else {
556 None
557 };
558
559 let include_ignored = matches.opt_present("include-ignored");
560 if !allow_unstable && include_ignored {
561 return Some(Err(
562 "The \"include-ignored\" flag is only accepted on the nightly compiler".into()
563 ));
564 }
565
566 let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
567 (true, true) => return Some(Err(
568 "the options --include-ignored and --ignored are mutually exclusive".into()
569 )),
570 (true, false) => RunIgnored::Yes,
571 (false, true) => RunIgnored::Only,
572 (false, false) => RunIgnored::No,
573 };
574 let quiet = matches.opt_present("quiet");
575 let exact = matches.opt_present("exact");
576 let list = matches.opt_present("list");
577
578 let logfile = matches.opt_str("logfile");
579 let logfile = logfile.map(|s| PathBuf::from(&s));
580
581 let bench_benchmarks = matches.opt_present("bench");
582 let run_tests = !bench_benchmarks || matches.opt_present("test");
583
584 let mut nocapture = matches.opt_present("nocapture");
585 if !nocapture {
586 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
587 Ok(val) => &val != "0",
588 Err(_) => false,
589 };
590 }
591
592 let test_threads = match matches.opt_str("test-threads") {
593 Some(n_str) => match n_str.parse::<usize>() {
594 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
595 Ok(n) => Some(n),
596 Err(e) => {
597 return Some(Err(format!(
598 "argument for --test-threads must be a number > 0 \
599 (error: {})",
600 e
601 )))
602 }
603 },
604 None => None,
605 };
606
607 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
608 Some("auto") | None => AutoColor,
609 Some("always") => AlwaysColor,
610 Some("never") => NeverColor,
611
612 Some(v) => {
613 return Some(Err(format!(
614 "argument for --color must be auto, always, or never (was \
615 {})",
616 v
617 )))
618 }
619 };
620
621 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
622 None if quiet => OutputFormat::Terse,
623 Some("pretty") | None => OutputFormat::Pretty,
624 Some("terse") => OutputFormat::Terse,
625 Some("json") => {
626 if !allow_unstable {
627 return Some(Err(
628 "The \"json\" format is only accepted on the nightly compiler".into(),
629 ));
630 }
631 OutputFormat::Json
632 }
633
634 Some(v) => {
635 return Some(Err(format!(
636 "argument for --format must be pretty, terse, or json (was \
637 {})",
638 v
639 )))
640 }
641 };
642
643 let test_opts = TestOpts {
644 list,
645 filter,
646 filter_exact: exact,
647 run_ignored,
648 run_tests,
649 bench_benchmarks,
650 logfile,
651 nocapture,
652 color,
653 format,
654 test_threads,
655 skip: matches.opt_strs("skip"),
656 options: Options::new(),
657 };
658
659 Some(Ok(test_opts))
660 }
661
662 #[derive(Clone, PartialEq)]
663 pub struct BenchSamples {
664 ns_iter_summ: stats::Summary,
665 mb_s: usize,
666 }
667
668 #[derive(Clone, PartialEq)]
669 pub enum TestResult {
670 TrOk,
671 TrFailed,
672 TrFailedMsg(String),
673 TrIgnored,
674 TrAllowedFail,
675 TrBench(BenchSamples),
676 }
677
678 unsafe impl Send for TestResult {}
679
680 enum OutputLocation<T> {
681 Pretty(Box<term::StdoutTerminal>),
682 Raw(T),
683 }
684
685 impl<T: Write> Write for OutputLocation<T> {
686 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
687 match *self {
688 Pretty(ref mut term) => term.write(buf),
689 Raw(ref mut stdout) => stdout.write(buf),
690 }
691 }
692
693 fn flush(&mut self) -> io::Result<()> {
694 match *self {
695 Pretty(ref mut term) => term.flush(),
696 Raw(ref mut stdout) => stdout.flush(),
697 }
698 }
699 }
700
701 struct ConsoleTestState {
702 log_out: Option<File>,
703 total: usize,
704 passed: usize,
705 failed: usize,
706 ignored: usize,
707 allowed_fail: usize,
708 filtered_out: usize,
709 measured: usize,
710 metrics: MetricMap,
711 failures: Vec<(TestDesc, Vec<u8>)>,
712 not_failures: Vec<(TestDesc, Vec<u8>)>,
713 options: Options,
714 }
715
716 impl ConsoleTestState {
717 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
718 let log_out = match opts.logfile {
719 Some(ref path) => Some(File::create(path)?),
720 None => None,
721 };
722
723 Ok(ConsoleTestState {
724 log_out,
725 total: 0,
726 passed: 0,
727 failed: 0,
728 ignored: 0,
729 allowed_fail: 0,
730 filtered_out: 0,
731 measured: 0,
732 metrics: MetricMap::new(),
733 failures: Vec::new(),
734 not_failures: Vec::new(),
735 options: opts.options,
736 })
737 }
738
739 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
740 let msg = msg.as_ref();
741 match self.log_out {
742 None => Ok(()),
743 Some(ref mut o) => o.write_all(msg.as_bytes()),
744 }
745 }
746
747 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
748 self.write_log(format!(
749 "{} {}\n",
750 match *result {
751 TrOk => "ok".to_owned(),
752 TrFailed => "failed".to_owned(),
753 TrFailedMsg(ref msg) => format!("failed: {}", msg),
754 TrIgnored => "ignored".to_owned(),
755 TrAllowedFail => "failed (allowed)".to_owned(),
756 TrBench(ref bs) => fmt_bench_samples(bs),
757 },
758 test.name
759 ))
760 }
761
762 fn current_test_count(&self) -> usize {
763 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
764 }
765 }
766
767 // Format a number with thousands separators
768 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
769 use std::fmt::Write;
770 let mut output = String::new();
771 let mut trailing = false;
772 for &pow in &[9, 6, 3, 0] {
773 let base = 10_usize.pow(pow);
774 if pow == 0 || trailing || n / base != 0 {
775 if !trailing {
776 output.write_fmt(format_args!("{}", n / base)).unwrap();
777 } else {
778 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
779 }
780 if pow != 0 {
781 output.push(sep);
782 }
783 trailing = true;
784 }
785 n %= base;
786 }
787
788 output
789 }
790
791 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
792 use std::fmt::Write;
793 let mut output = String::new();
794
795 let median = bs.ns_iter_summ.median as usize;
796 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
797
798 output
799 .write_fmt(format_args!(
800 "{:>11} ns/iter (+/- {})",
801 fmt_thousands_sep(median, ','),
802 fmt_thousands_sep(deviation, ',')
803 ))
804 .unwrap();
805 if bs.mb_s != 0 {
806 output
807 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
808 .unwrap();
809 }
810 output
811 }
812
813 // List the tests to console, and optionally to logfile. Filters are honored.
814 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
815 let mut output = match term::stdout() {
816 None => Raw(io::stdout()),
817 Some(t) => Pretty(t),
818 };
819
820 let quiet = opts.format == OutputFormat::Terse;
821 let mut st = ConsoleTestState::new(opts)?;
822
823 let mut ntest = 0;
824 let mut nbench = 0;
825
826 for test in filter_tests(&opts, tests) {
827 use TestFn::*;
828
829 let TestDescAndFn {
830 desc: TestDesc { name, .. },
831 testfn,
832 } = test;
833
834 let fntype = match testfn {
835 StaticTestFn(..) | DynTestFn(..) => {
836 ntest += 1;
837 "test"
838 }
839 StaticBenchFn(..) | DynBenchFn(..) => {
840 nbench += 1;
841 "benchmark"
842 }
843 };
844
845 writeln!(output, "{}: {}", name, fntype)?;
846 st.write_log(format!("{} {}\n", fntype, name))?;
847 }
848
849 fn plural(count: u32, s: &str) -> String {
850 match count {
851 1 => format!("{} {}", 1, s),
852 n => format!("{} {}s", n, s),
853 }
854 }
855
856 if !quiet {
857 if ntest != 0 || nbench != 0 {
858 writeln!(output, "")?;
859 }
860
861 writeln!(
862 output,
863 "{}, {}",
864 plural(ntest, "test"),
865 plural(nbench, "benchmark")
866 )?;
867 }
868
869 Ok(())
870 }
871
872 // A simple console test runner
873 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
874 fn callback(
875 event: &TestEvent,
876 st: &mut ConsoleTestState,
877 out: &mut dyn OutputFormatter,
878 ) -> io::Result<()> {
879 match (*event).clone() {
880 TeFiltered(ref filtered_tests) => {
881 st.total = filtered_tests.len();
882 out.write_run_start(filtered_tests.len())
883 }
884 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
885 TeWait(ref test) => out.write_test_start(test),
886 TeTimeout(ref test) => out.write_timeout(test),
887 TeResult(test, result, stdout) => {
888 st.write_log_result(&test, &result)?;
889 out.write_result(&test, &result, &*stdout)?;
890 match result {
891 TrOk => {
892 st.passed += 1;
893 st.not_failures.push((test, stdout));
894 }
895 TrIgnored => st.ignored += 1,
896 TrAllowedFail => st.allowed_fail += 1,
897 TrBench(bs) => {
898 st.metrics.insert_metric(
899 test.name.as_slice(),
900 bs.ns_iter_summ.median,
901 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
902 );
903 st.measured += 1
904 }
905 TrFailed => {
906 st.failed += 1;
907 st.failures.push((test, stdout));
908 }
909 TrFailedMsg(msg) => {
910 st.failed += 1;
911 let mut stdout = stdout;
912 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
913 st.failures.push((test, stdout));
914 }
915 }
916 Ok(())
917 }
918 }
919 }
920
921 let output = match term::stdout() {
922 None => Raw(io::stdout()),
923 Some(t) => Pretty(t),
924 };
925
926 let max_name_len = tests
927 .iter()
928 .max_by_key(|t| len_if_padded(*t))
929 .map(|t| t.desc.name.as_slice().len())
930 .unwrap_or(0);
931
932 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
933
934 let mut out: Box<dyn OutputFormatter> = match opts.format {
935 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
936 output,
937 use_color(opts),
938 max_name_len,
939 is_multithreaded,
940 )),
941 OutputFormat::Terse => Box::new(TerseFormatter::new(
942 output,
943 use_color(opts),
944 max_name_len,
945 is_multithreaded,
946 )),
947 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
948 };
949 let mut st = ConsoleTestState::new(opts)?;
950 fn len_if_padded(t: &TestDescAndFn) -> usize {
951 match t.testfn.padding() {
952 PadNone => 0,
953 PadOnRight => t.desc.name.as_slice().len(),
954 }
955 }
956
957 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
958
959 assert!(st.current_test_count() == st.total);
960
961 return out.write_run_finish(&st);
962 }
963
964 #[test]
965 fn should_sort_failures_before_printing_them() {
966 let test_a = TestDesc {
967 name: StaticTestName("a"),
968 ignore: false,
969 should_panic: ShouldPanic::No,
970 allow_fail: false,
971 };
972
973 let test_b = TestDesc {
974 name: StaticTestName("b"),
975 ignore: false,
976 should_panic: ShouldPanic::No,
977 allow_fail: false,
978 };
979
980 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
981
982 let st = ConsoleTestState {
983 log_out: None,
984 total: 0,
985 passed: 0,
986 failed: 0,
987 ignored: 0,
988 allowed_fail: 0,
989 filtered_out: 0,
990 measured: 0,
991 metrics: MetricMap::new(),
992 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
993 options: Options::new(),
994 not_failures: Vec::new(),
995 };
996
997 out.write_failures(&st).unwrap();
998 let s = match out.output_location() {
999 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
1000 &Pretty(_) => unreachable!(),
1001 };
1002
1003 let apos = s.find("a").unwrap();
1004 let bpos = s.find("b").unwrap();
1005 assert!(apos < bpos);
1006 }
1007
1008 fn use_color(opts: &TestOpts) -> bool {
1009 match opts.color {
1010 AutoColor => !opts.nocapture && stdout_isatty(),
1011 AlwaysColor => true,
1012 NeverColor => false,
1013 }
1014 }
1015
1016 #[cfg(any(target_os = "cloudabi",
1017 target_os = "redox",
1018 all(target_arch = "wasm32", not(target_os = "emscripten")),
1019 all(target_vendor = "fortanix", target_env = "sgx")))]
1020 fn stdout_isatty() -> bool {
1021 // FIXME: Implement isatty on Redox and SGX
1022 false
1023 }
1024 #[cfg(unix)]
1025 fn stdout_isatty() -> bool {
1026 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1027 }
1028 #[cfg(windows)]
1029 fn stdout_isatty() -> bool {
1030 type DWORD = u32;
1031 type BOOL = i32;
1032 type HANDLE = *mut u8;
1033 type LPDWORD = *mut u32;
1034 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1035 extern "system" {
1036 fn GetStdHandle(which: DWORD) -> HANDLE;
1037 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1038 }
1039 unsafe {
1040 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1041 let mut out = 0;
1042 GetConsoleMode(handle, &mut out) != 0
1043 }
1044 }
1045
1046 #[derive(Clone)]
1047 pub enum TestEvent {
1048 TeFiltered(Vec<TestDesc>),
1049 TeWait(TestDesc),
1050 TeResult(TestDesc, TestResult, Vec<u8>),
1051 TeTimeout(TestDesc),
1052 TeFilteredOut(usize),
1053 }
1054
1055 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1056
1057 struct Sink(Arc<Mutex<Vec<u8>>>);
1058 impl Write for Sink {
1059 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1060 Write::write(&mut *self.0.lock().unwrap(), data)
1061 }
1062 fn flush(&mut self) -> io::Result<()> {
1063 Ok(())
1064 }
1065 }
1066
1067 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1068 where
1069 F: FnMut(TestEvent) -> io::Result<()>,
1070 {
1071 use std::collections::{self, HashMap};
1072 use std::hash::BuildHasherDefault;
1073 use std::sync::mpsc::RecvTimeoutError;
1074 // Use a deterministic hasher
1075 type TestMap =
1076 HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
1077
1078 let tests_len = tests.len();
1079
1080 let mut filtered_tests = filter_tests(opts, tests);
1081 if !opts.bench_benchmarks {
1082 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1083 }
1084
1085 let filtered_tests = {
1086 let mut filtered_tests = filtered_tests;
1087 for test in filtered_tests.iter_mut() {
1088 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1089 }
1090
1091 filtered_tests
1092 };
1093
1094 let filtered_out = tests_len - filtered_tests.len();
1095 callback(TeFilteredOut(filtered_out))?;
1096
1097 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1098
1099 callback(TeFiltered(filtered_descs))?;
1100
1101 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1102 filtered_tests.into_iter().partition(|e| match e.testfn {
1103 StaticTestFn(_) | DynTestFn(_) => true,
1104 _ => false,
1105 });
1106
1107 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1108
1109 let mut remaining = filtered_tests;
1110 remaining.reverse();
1111 let mut pending = 0;
1112
1113 let (tx, rx) = channel::<MonitorMsg>();
1114
1115 let mut running_tests: TestMap = HashMap::default();
1116
1117 fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
1118 let now = Instant::now();
1119 let timed_out = running_tests
1120 .iter()
1121 .filter_map(|(desc, timeout)| {
1122 if &now >= timeout {
1123 Some(desc.clone())
1124 } else {
1125 None
1126 }
1127 })
1128 .collect();
1129 for test in &timed_out {
1130 running_tests.remove(test);
1131 }
1132 timed_out
1133 };
1134
1135 fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
1136 running_tests.values().min().map(|next_timeout| {
1137 let now = Instant::now();
1138 if *next_timeout >= now {
1139 *next_timeout - now
1140 } else {
1141 Duration::new(0, 0)
1142 }
1143 })
1144 };
1145
1146 if concurrency == 1 {
1147 while !remaining.is_empty() {
1148 let test = remaining.pop().unwrap();
1149 callback(TeWait(test.desc.clone()))?;
1150 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::No);
1151 let (test, result, stdout) = rx.recv().unwrap();
1152 callback(TeResult(test, result, stdout))?;
1153 }
1154 } else {
1155 while pending > 0 || !remaining.is_empty() {
1156 while pending < concurrency && !remaining.is_empty() {
1157 let test = remaining.pop().unwrap();
1158 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1159 running_tests.insert(test.desc.clone(), timeout);
1160 callback(TeWait(test.desc.clone()))?; //here no pad
1161 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::Yes);
1162 pending += 1;
1163 }
1164
1165 let mut res;
1166 loop {
1167 if let Some(timeout) = calc_timeout(&running_tests) {
1168 res = rx.recv_timeout(timeout);
1169 for test in get_timed_out_tests(&mut running_tests) {
1170 callback(TeTimeout(test))?;
1171 }
1172 if res != Err(RecvTimeoutError::Timeout) {
1173 break;
1174 }
1175 } else {
1176 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1177 break;
1178 }
1179 }
1180
1181 let (desc, result, stdout) = res.unwrap();
1182 running_tests.remove(&desc);
1183
1184 callback(TeResult(desc, result, stdout))?;
1185 pending -= 1;
1186 }
1187 }
1188
1189 if opts.bench_benchmarks {
1190 // All benchmarks run at the end, in serial.
1191 for b in filtered_benchs {
1192 callback(TeWait(b.desc.clone()))?;
1193 run_test(opts, false, b, tx.clone(), Concurrent::No);
1194 let (test, result, stdout) = rx.recv().unwrap();
1195 callback(TeResult(test, result, stdout))?;
1196 }
1197 }
1198 Ok(())
1199 }
1200
1201 #[allow(deprecated)]
1202 fn get_concurrency() -> usize {
1203 return match env::var("RUST_TEST_THREADS") {
1204 Ok(s) => {
1205 let opt_n: Option<usize> = s.parse().ok();
1206 match opt_n {
1207 Some(n) if n > 0 => n,
1208 _ => panic!(
1209 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1210 s
1211 ),
1212 }
1213 }
1214 Err(..) => num_cpus(),
1215 };
1216
1217 #[cfg(windows)]
1218 #[allow(nonstandard_style)]
1219 fn num_cpus() -> usize {
1220 #[repr(C)]
1221 struct SYSTEM_INFO {
1222 wProcessorArchitecture: u16,
1223 wReserved: u16,
1224 dwPageSize: u32,
1225 lpMinimumApplicationAddress: *mut u8,
1226 lpMaximumApplicationAddress: *mut u8,
1227 dwActiveProcessorMask: *mut u8,
1228 dwNumberOfProcessors: u32,
1229 dwProcessorType: u32,
1230 dwAllocationGranularity: u32,
1231 wProcessorLevel: u16,
1232 wProcessorRevision: u16,
1233 }
1234 extern "system" {
1235 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1236 }
1237 unsafe {
1238 let mut sysinfo = std::mem::zeroed();
1239 GetSystemInfo(&mut sysinfo);
1240 sysinfo.dwNumberOfProcessors as usize
1241 }
1242 }
1243
1244 #[cfg(target_os = "redox")]
1245 fn num_cpus() -> usize {
1246 // FIXME: Implement num_cpus on Redox
1247 1
1248 }
1249
1250 #[cfg(any(all(target_arch = "wasm32", not(target_os = "emscripten")),
1251 all(target_vendor = "fortanix", target_env = "sgx")))]
1252 fn num_cpus() -> usize {
1253 1
1254 }
1255
1256 #[cfg(any(target_os = "android", target_os = "cloudabi", target_os = "emscripten",
1257 target_os = "fuchsia", target_os = "ios", target_os = "linux",
1258 target_os = "macos", target_os = "solaris"))]
1259 fn num_cpus() -> usize {
1260 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1261 }
1262
1263 #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "bitrig",
1264 target_os = "netbsd"))]
1265 fn num_cpus() -> usize {
1266 use std::ptr;
1267
1268 let mut cpus: libc::c_uint = 0;
1269 let mut cpus_size = std::mem::size_of_val(&cpus);
1270
1271 unsafe {
1272 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1273 }
1274 if cpus < 1 {
1275 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1276 unsafe {
1277 libc::sysctl(
1278 mib.as_mut_ptr(),
1279 2,
1280 &mut cpus as *mut _ as *mut _,
1281 &mut cpus_size as *mut _ as *mut _,
1282 ptr::null_mut(),
1283 0,
1284 );
1285 }
1286 if cpus < 1 {
1287 cpus = 1;
1288 }
1289 }
1290 cpus as usize
1291 }
1292
1293 #[cfg(target_os = "openbsd")]
1294 fn num_cpus() -> usize {
1295 use std::ptr;
1296
1297 let mut cpus: libc::c_uint = 0;
1298 let mut cpus_size = std::mem::size_of_val(&cpus);
1299 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1300
1301 unsafe {
1302 libc::sysctl(
1303 mib.as_mut_ptr(),
1304 2,
1305 &mut cpus as *mut _ as *mut _,
1306 &mut cpus_size as *mut _ as *mut _,
1307 ptr::null_mut(),
1308 0,
1309 );
1310 }
1311 if cpus < 1 {
1312 cpus = 1;
1313 }
1314 cpus as usize
1315 }
1316
1317 #[cfg(target_os = "haiku")]
1318 fn num_cpus() -> usize {
1319 // FIXME: implement
1320 1
1321 }
1322
1323 #[cfg(target_os = "l4re")]
1324 fn num_cpus() -> usize {
1325 // FIXME: implement
1326 1
1327 }
1328 }
1329
1330 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1331 let mut filtered = tests;
1332 let matches_filter = |test: &TestDescAndFn, filter: &str| {
1333 let test_name = test.desc.name.as_slice();
1334
1335 match opts.filter_exact {
1336 true => test_name == filter,
1337 false => test_name.contains(filter),
1338 }
1339 };
1340
1341 // Remove tests that don't match the test filter
1342 if let Some(ref filter) = opts.filter {
1343 filtered.retain(|test| matches_filter(test, filter));
1344 }
1345
1346 // Skip tests that match any of the skip filters
1347 filtered.retain(|test| {
1348 !opts.skip.iter().any(|sf| matches_filter(test, sf))
1349 });
1350
1351 // maybe unignore tests
1352 match opts.run_ignored {
1353 RunIgnored::Yes => {
1354 filtered.iter_mut().for_each(|test| test.desc.ignore = false);
1355 },
1356 RunIgnored::Only => {
1357 filtered.retain(|test| test.desc.ignore);
1358 filtered.iter_mut().for_each(|test| test.desc.ignore = false);
1359 }
1360 RunIgnored::No => {}
1361 }
1362
1363 // Sort the tests alphabetically
1364 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1365
1366 filtered
1367 }
1368
1369 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1370 // convert benchmarks to tests, if we're not benchmarking them
1371 tests
1372 .into_iter()
1373 .map(|x| {
1374 let testfn = match x.testfn {
1375 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1376 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1377 })),
1378 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1379 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1380 })),
1381 f => f,
1382 };
1383 TestDescAndFn {
1384 desc: x.desc,
1385 testfn,
1386 }
1387 })
1388 .collect()
1389 }
1390
1391 pub fn run_test(
1392 opts: &TestOpts,
1393 force_ignore: bool,
1394 test: TestDescAndFn,
1395 monitor_ch: Sender<MonitorMsg>,
1396 concurrency: Concurrent,
1397 ) {
1398 let TestDescAndFn { desc, testfn } = test;
1399
1400 let ignore_because_panic_abort = cfg!(target_arch = "wasm32") && !cfg!(target_os = "emscripten")
1401 && desc.should_panic != ShouldPanic::No;
1402
1403 if force_ignore || desc.ignore || ignore_because_panic_abort {
1404 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1405 return;
1406 }
1407
1408 fn run_test_inner(
1409 desc: TestDesc,
1410 monitor_ch: Sender<MonitorMsg>,
1411 nocapture: bool,
1412 testfn: Box<dyn FnBox() + Send>,
1413 concurrency: Concurrent,
1414 ) {
1415 // Buffer for capturing standard I/O
1416 let data = Arc::new(Mutex::new(Vec::new()));
1417 let data2 = data.clone();
1418
1419 let name = desc.name.clone();
1420 let runtest = move || {
1421 let oldio = if !nocapture {
1422 Some((
1423 io::set_print(Some(Box::new(Sink(data2.clone())))),
1424 io::set_panic(Some(Box::new(Sink(data2)))),
1425 ))
1426 } else {
1427 None
1428 };
1429
1430 let result = catch_unwind(AssertUnwindSafe(testfn));
1431
1432 if let Some((printio, panicio)) = oldio {
1433 io::set_print(printio);
1434 io::set_panic(panicio);
1435 };
1436
1437 let test_result = calc_result(&desc, result);
1438 let stdout = data.lock().unwrap().to_vec();
1439 monitor_ch
1440 .send((desc.clone(), test_result, stdout))
1441 .unwrap();
1442 };
1443
1444 // If the platform is single-threaded we're just going to run
1445 // the test synchronously, regardless of the concurrency
1446 // level.
1447 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1448 if concurrency == Concurrent::Yes && supports_threads {
1449 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1450 cfg.spawn(runtest).unwrap();
1451 } else {
1452 runtest();
1453 }
1454 }
1455
1456 match testfn {
1457 DynBenchFn(bencher) => {
1458 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1459 bencher.run(harness)
1460 });
1461 }
1462 StaticBenchFn(benchfn) => {
1463 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1464 (benchfn.clone())(harness)
1465 });
1466 }
1467 DynTestFn(f) => {
1468 let cb = move || __rust_begin_short_backtrace(f);
1469 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb), concurrency)
1470 }
1471 StaticTestFn(f) => run_test_inner(
1472 desc,
1473 monitor_ch,
1474 opts.nocapture,
1475 Box::new(move || __rust_begin_short_backtrace(f)),
1476 concurrency,
1477 ),
1478 }
1479 }
1480
1481 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1482 #[inline(never)]
1483 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1484 f()
1485 }
1486
1487 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1488 match (&desc.should_panic, task_result) {
1489 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1490 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1491 if err.downcast_ref::<String>()
1492 .map(|e| &**e)
1493 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1494 .map(|e| e.contains(msg))
1495 .unwrap_or(false)
1496 {
1497 TrOk
1498 } else {
1499 if desc.allow_fail {
1500 TrAllowedFail
1501 } else {
1502 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1503 }
1504 }
1505 }
1506 _ if desc.allow_fail => TrAllowedFail,
1507 _ => TrFailed,
1508 }
1509 }
1510
1511 #[derive(Clone, PartialEq)]
1512 pub struct MetricMap(BTreeMap<String, Metric>);
1513
1514 impl MetricMap {
1515 pub fn new() -> MetricMap {
1516 MetricMap(BTreeMap::new())
1517 }
1518
1519 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1520 /// must be non-negative. The `noise` indicates the uncertainty of the
1521 /// metric, which doubles as the "noise range" of acceptable
1522 /// pairwise-regressions on this named value, when comparing from one
1523 /// metric to the next using `compare_to_old`.
1524 ///
1525 /// If `noise` is positive, then it means this metric is of a value
1526 /// you want to see grow smaller, so a change larger than `noise` in the
1527 /// positive direction represents a regression.
1528 ///
1529 /// If `noise` is negative, then it means this metric is of a value
1530 /// you want to see grow larger, so a change larger than `noise` in the
1531 /// negative direction represents a regression.
1532 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1533 let m = Metric { value, noise };
1534 self.0.insert(name.to_owned(), m);
1535 }
1536
1537 pub fn fmt_metrics(&self) -> String {
1538 let v = self.0
1539 .iter()
1540 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1541 .collect::<Vec<_>>();
1542 v.join(", ")
1543 }
1544 }
1545
1546 // Benchmarking
1547
1548 /// A function that is opaque to the optimizer, to allow benchmarks to
1549 /// pretend to use outputs to assist in avoiding dead-code
1550 /// elimination.
1551 ///
1552 /// This function is a no-op, and does not even read from `dummy`.
1553 #[cfg(not(any(target_arch = "asmjs", target_arch = "wasm32")))]
1554 pub fn black_box<T>(dummy: T) -> T {
1555 // we need to "use" the argument in some way LLVM can't
1556 // introspect.
1557 unsafe { asm!("" : : "r"(&dummy)) }
1558 dummy
1559 }
1560 #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))]
1561 #[inline(never)]
1562 pub fn black_box<T>(dummy: T) -> T {
1563 dummy
1564 }
1565
1566 impl Bencher {
1567 /// Callback for benchmark functions to run in their body.
1568 pub fn iter<T, F>(&mut self, mut inner: F)
1569 where
1570 F: FnMut() -> T,
1571 {
1572 if self.mode == BenchMode::Single {
1573 ns_iter_inner(&mut inner, 1);
1574 return;
1575 }
1576
1577 self.summary = Some(iter(&mut inner));
1578 }
1579
1580 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1581 where
1582 F: FnMut(&mut Bencher),
1583 {
1584 f(self);
1585 return self.summary;
1586 }
1587 }
1588
1589 fn ns_from_dur(dur: Duration) -> u64 {
1590 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1591 }
1592
1593 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1594 where
1595 F: FnMut() -> T,
1596 {
1597 let start = Instant::now();
1598 for _ in 0..k {
1599 black_box(inner());
1600 }
1601 return ns_from_dur(start.elapsed());
1602 }
1603
1604 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1605 where
1606 F: FnMut() -> T,
1607 {
1608 // Initial bench run to get ballpark figure.
1609 let ns_single = ns_iter_inner(inner, 1);
1610
1611 // Try to estimate iter count for 1ms falling back to 1m
1612 // iterations if first run took < 1ns.
1613 let ns_target_total = 1_000_000; // 1ms
1614 let mut n = ns_target_total / cmp::max(1, ns_single);
1615
1616 // if the first run took more than 1ms we don't want to just
1617 // be left doing 0 iterations on every loop. The unfortunate
1618 // side effect of not being able to do as many runs is
1619 // automatically handled by the statistical analysis below
1620 // (i.e., larger error bars).
1621 n = cmp::max(1, n);
1622
1623 let mut total_run = Duration::new(0, 0);
1624 let samples: &mut [f64] = &mut [0.0_f64; 50];
1625 loop {
1626 let loop_start = Instant::now();
1627
1628 for p in &mut *samples {
1629 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1630 }
1631
1632 stats::winsorize(samples, 5.0);
1633 let summ = stats::Summary::new(samples);
1634
1635 for p in &mut *samples {
1636 let ns = ns_iter_inner(inner, 5 * n);
1637 *p = ns as f64 / (5 * n) as f64;
1638 }
1639
1640 stats::winsorize(samples, 5.0);
1641 let summ5 = stats::Summary::new(samples);
1642
1643 let loop_run = loop_start.elapsed();
1644
1645 // If we've run for 100ms and seem to have converged to a
1646 // stable median.
1647 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0
1648 && summ.median - summ5.median < summ5.median_abs_dev
1649 {
1650 return summ5;
1651 }
1652
1653 total_run = total_run + loop_run;
1654 // Longest we ever run for is 3s.
1655 if total_run > Duration::from_secs(3) {
1656 return summ5;
1657 }
1658
1659 // If we overflow here just return the results so far. We check a
1660 // multiplier of 10 because we're about to multiply by 2 and the
1661 // next iteration of the loop will also multiply by 5 (to calculate
1662 // the summ5 result)
1663 n = match n.checked_mul(10) {
1664 Some(_) => n * 2,
1665 None => {
1666 return summ5;
1667 }
1668 };
1669 }
1670 }
1671
1672 pub mod bench {
1673 use std::panic::{catch_unwind, AssertUnwindSafe};
1674 use std::cmp;
1675 use std::io;
1676 use std::sync::{Arc, Mutex};
1677 use stats;
1678 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1679
1680 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1681 where
1682 F: FnMut(&mut Bencher),
1683 {
1684 let mut bs = Bencher {
1685 mode: BenchMode::Auto,
1686 summary: None,
1687 bytes: 0,
1688 };
1689
1690 let data = Arc::new(Mutex::new(Vec::new()));
1691 let data2 = data.clone();
1692
1693 let oldio = if !nocapture {
1694 Some((
1695 io::set_print(Some(Box::new(Sink(data2.clone())))),
1696 io::set_panic(Some(Box::new(Sink(data2)))),
1697 ))
1698 } else {
1699 None
1700 };
1701
1702 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1703
1704 if let Some((printio, panicio)) = oldio {
1705 io::set_print(printio);
1706 io::set_panic(panicio);
1707 };
1708
1709 let test_result = match result {
1710 //bs.bench(f) {
1711 Ok(Some(ns_iter_summ)) => {
1712 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1713 let mb_s = bs.bytes * 1000 / ns_iter;
1714
1715 let bs = BenchSamples {
1716 ns_iter_summ,
1717 mb_s: mb_s as usize,
1718 };
1719 TestResult::TrBench(bs)
1720 }
1721 Ok(None) => {
1722 // iter not called, so no data.
1723 // FIXME: error in this case?
1724 let samples: &mut [f64] = &mut [0.0_f64; 1];
1725 let bs = BenchSamples {
1726 ns_iter_summ: stats::Summary::new(samples),
1727 mb_s: 0,
1728 };
1729 TestResult::TrBench(bs)
1730 }
1731 Err(_) => TestResult::TrFailed,
1732 };
1733
1734 let stdout = data.lock().unwrap().to_vec();
1735 monitor_ch.send((desc, test_result, stdout)).unwrap();
1736 }
1737
1738 pub fn run_once<F>(f: F)
1739 where
1740 F: FnMut(&mut Bencher),
1741 {
1742 let mut bs = Bencher {
1743 mode: BenchMode::Single,
1744 summary: None,
1745 bytes: 0,
1746 };
1747 bs.bench(f);
1748 }
1749 }
1750
1751 #[cfg(test)]
1752 mod tests {
1753 use test::{filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored,
1754 ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed,
1755 TrFailedMsg, TrIgnored, TrOk};
1756 use std::sync::mpsc::channel;
1757 use bench;
1758 use Bencher;
1759 use Concurrent;
1760
1761
1762 fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
1763 vec![
1764 TestDescAndFn {
1765 desc: TestDesc {
1766 name: StaticTestName("1"),
1767 ignore: true,
1768 should_panic: ShouldPanic::No,
1769 allow_fail: false,
1770 },
1771 testfn: DynTestFn(Box::new(move || {})),
1772 },
1773 TestDescAndFn {
1774 desc: TestDesc {
1775 name: StaticTestName("2"),
1776 ignore: false,
1777 should_panic: ShouldPanic::No,
1778 allow_fail: false,
1779 },
1780 testfn: DynTestFn(Box::new(move || {})),
1781 },
1782 ]
1783 }
1784
1785 #[test]
1786 pub fn do_not_run_ignored_tests() {
1787 fn f() {
1788 panic!();
1789 }
1790 let desc = TestDescAndFn {
1791 desc: TestDesc {
1792 name: StaticTestName("whatever"),
1793 ignore: true,
1794 should_panic: ShouldPanic::No,
1795 allow_fail: false,
1796 },
1797 testfn: DynTestFn(Box::new(f)),
1798 };
1799 let (tx, rx) = channel();
1800 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1801 let (_, res, _) = rx.recv().unwrap();
1802 assert!(res != TrOk);
1803 }
1804
1805 #[test]
1806 pub fn ignored_tests_result_in_ignored() {
1807 fn f() {}
1808 let desc = TestDescAndFn {
1809 desc: TestDesc {
1810 name: StaticTestName("whatever"),
1811 ignore: true,
1812 should_panic: ShouldPanic::No,
1813 allow_fail: false,
1814 },
1815 testfn: DynTestFn(Box::new(f)),
1816 };
1817 let (tx, rx) = channel();
1818 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1819 let (_, res, _) = rx.recv().unwrap();
1820 assert!(res == TrIgnored);
1821 }
1822
1823 #[test]
1824 fn test_should_panic() {
1825 fn f() {
1826 panic!();
1827 }
1828 let desc = TestDescAndFn {
1829 desc: TestDesc {
1830 name: StaticTestName("whatever"),
1831 ignore: false,
1832 should_panic: ShouldPanic::Yes,
1833 allow_fail: false,
1834 },
1835 testfn: DynTestFn(Box::new(f)),
1836 };
1837 let (tx, rx) = channel();
1838 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1839 let (_, res, _) = rx.recv().unwrap();
1840 assert!(res == TrOk);
1841 }
1842
1843 #[test]
1844 fn test_should_panic_good_message() {
1845 fn f() {
1846 panic!("an error message");
1847 }
1848 let desc = TestDescAndFn {
1849 desc: TestDesc {
1850 name: StaticTestName("whatever"),
1851 ignore: false,
1852 should_panic: ShouldPanic::YesWithMessage("error message"),
1853 allow_fail: false,
1854 },
1855 testfn: DynTestFn(Box::new(f)),
1856 };
1857 let (tx, rx) = channel();
1858 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1859 let (_, res, _) = rx.recv().unwrap();
1860 assert!(res == TrOk);
1861 }
1862
1863 #[test]
1864 fn test_should_panic_bad_message() {
1865 fn f() {
1866 panic!("an error message");
1867 }
1868 let expected = "foobar";
1869 let failed_msg = "Panic did not include expected string";
1870 let desc = TestDescAndFn {
1871 desc: TestDesc {
1872 name: StaticTestName("whatever"),
1873 ignore: false,
1874 should_panic: ShouldPanic::YesWithMessage(expected),
1875 allow_fail: false,
1876 },
1877 testfn: DynTestFn(Box::new(f)),
1878 };
1879 let (tx, rx) = channel();
1880 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1881 let (_, res, _) = rx.recv().unwrap();
1882 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1883 }
1884
1885 #[test]
1886 fn test_should_panic_but_succeeds() {
1887 fn f() {}
1888 let desc = TestDescAndFn {
1889 desc: TestDesc {
1890 name: StaticTestName("whatever"),
1891 ignore: false,
1892 should_panic: ShouldPanic::Yes,
1893 allow_fail: false,
1894 },
1895 testfn: DynTestFn(Box::new(f)),
1896 };
1897 let (tx, rx) = channel();
1898 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1899 let (_, res, _) = rx.recv().unwrap();
1900 assert!(res == TrFailed);
1901 }
1902
1903 #[test]
1904 fn parse_ignored_flag() {
1905 let args = vec![
1906 "progname".to_string(),
1907 "filter".to_string(),
1908 "--ignored".to_string(),
1909 ];
1910 let opts = parse_opts(&args).unwrap().unwrap();
1911 assert_eq!(opts.run_ignored, RunIgnored::Only);
1912 }
1913
1914 #[test]
1915 fn parse_include_ignored_flag() {
1916 let args = vec![
1917 "progname".to_string(),
1918 "filter".to_string(),
1919 "-Zunstable-options".to_string(),
1920 "--include-ignored".to_string(),
1921 ];
1922 let opts = parse_opts(&args).unwrap().unwrap();
1923 assert_eq!(opts.run_ignored, RunIgnored::Yes);
1924 }
1925
1926 #[test]
1927 pub fn filter_for_ignored_option() {
1928 // When we run ignored tests the test filter should filter out all the
1929 // unignored tests and flip the ignore flag on the rest to false
1930
1931 let mut opts = TestOpts::new();
1932 opts.run_tests = true;
1933 opts.run_ignored = RunIgnored::Only;
1934
1935 let tests = one_ignored_one_unignored_test();
1936 let filtered = filter_tests(&opts, tests);
1937
1938 assert_eq!(filtered.len(), 1);
1939 assert_eq!(filtered[0].desc.name.to_string(), "1");
1940 assert!(!filtered[0].desc.ignore);
1941 }
1942
1943 #[test]
1944 pub fn run_include_ignored_option() {
1945 // When we "--include-ignored" tests, the ignore flag should be set to false on
1946 // all tests and no test filtered out
1947
1948 let mut opts = TestOpts::new();
1949 opts.run_tests = true;
1950 opts.run_ignored = RunIgnored::Yes;
1951
1952 let tests = one_ignored_one_unignored_test();
1953 let filtered = filter_tests(&opts, tests);
1954
1955 assert_eq!(filtered.len(), 2);
1956 assert!(!filtered[0].desc.ignore);
1957 assert!(!filtered[1].desc.ignore);
1958 }
1959
1960 #[test]
1961 pub fn exact_filter_match() {
1962 fn tests() -> Vec<TestDescAndFn> {
1963 vec!["base", "base::test", "base::test1", "base::test2"]
1964 .into_iter()
1965 .map(|name| TestDescAndFn {
1966 desc: TestDesc {
1967 name: StaticTestName(name),
1968 ignore: false,
1969 should_panic: ShouldPanic::No,
1970 allow_fail: false,
1971 },
1972 testfn: DynTestFn(Box::new(move || {})),
1973 })
1974 .collect()
1975 }
1976
1977 let substr = filter_tests(
1978 &TestOpts {
1979 filter: Some("base".into()),
1980 ..TestOpts::new()
1981 },
1982 tests(),
1983 );
1984 assert_eq!(substr.len(), 4);
1985
1986 let substr = filter_tests(
1987 &TestOpts {
1988 filter: Some("bas".into()),
1989 ..TestOpts::new()
1990 },
1991 tests(),
1992 );
1993 assert_eq!(substr.len(), 4);
1994
1995 let substr = filter_tests(
1996 &TestOpts {
1997 filter: Some("::test".into()),
1998 ..TestOpts::new()
1999 },
2000 tests(),
2001 );
2002 assert_eq!(substr.len(), 3);
2003
2004 let substr = filter_tests(
2005 &TestOpts {
2006 filter: Some("base::test".into()),
2007 ..TestOpts::new()
2008 },
2009 tests(),
2010 );
2011 assert_eq!(substr.len(), 3);
2012
2013 let exact = filter_tests(
2014 &TestOpts {
2015 filter: Some("base".into()),
2016 filter_exact: true,
2017 ..TestOpts::new()
2018 },
2019 tests(),
2020 );
2021 assert_eq!(exact.len(), 1);
2022
2023 let exact = filter_tests(
2024 &TestOpts {
2025 filter: Some("bas".into()),
2026 filter_exact: true,
2027 ..TestOpts::new()
2028 },
2029 tests(),
2030 );
2031 assert_eq!(exact.len(), 0);
2032
2033 let exact = filter_tests(
2034 &TestOpts {
2035 filter: Some("::test".into()),
2036 filter_exact: true,
2037 ..TestOpts::new()
2038 },
2039 tests(),
2040 );
2041 assert_eq!(exact.len(), 0);
2042
2043 let exact = filter_tests(
2044 &TestOpts {
2045 filter: Some("base::test".into()),
2046 filter_exact: true,
2047 ..TestOpts::new()
2048 },
2049 tests(),
2050 );
2051 assert_eq!(exact.len(), 1);
2052 }
2053
2054 #[test]
2055 pub fn sort_tests() {
2056 let mut opts = TestOpts::new();
2057 opts.run_tests = true;
2058
2059 let names = vec![
2060 "sha1::test".to_string(),
2061 "isize::test_to_str".to_string(),
2062 "isize::test_pow".to_string(),
2063 "test::do_not_run_ignored_tests".to_string(),
2064 "test::ignored_tests_result_in_ignored".to_string(),
2065 "test::first_free_arg_should_be_a_filter".to_string(),
2066 "test::parse_ignored_flag".to_string(),
2067 "test::parse_include_ignored_flag".to_string(),
2068 "test::filter_for_ignored_option".to_string(),
2069 "test::run_include_ignored_option".to_string(),
2070 "test::sort_tests".to_string(),
2071 ];
2072 let tests = {
2073 fn testfn() {}
2074 let mut tests = Vec::new();
2075 for name in &names {
2076 let test = TestDescAndFn {
2077 desc: TestDesc {
2078 name: DynTestName((*name).clone()),
2079 ignore: false,
2080 should_panic: ShouldPanic::No,
2081 allow_fail: false,
2082 },
2083 testfn: DynTestFn(Box::new(testfn)),
2084 };
2085 tests.push(test);
2086 }
2087 tests
2088 };
2089 let filtered = filter_tests(&opts, tests);
2090
2091 let expected = vec![
2092 "isize::test_pow".to_string(),
2093 "isize::test_to_str".to_string(),
2094 "sha1::test".to_string(),
2095 "test::do_not_run_ignored_tests".to_string(),
2096 "test::filter_for_ignored_option".to_string(),
2097 "test::first_free_arg_should_be_a_filter".to_string(),
2098 "test::ignored_tests_result_in_ignored".to_string(),
2099 "test::parse_ignored_flag".to_string(),
2100 "test::parse_include_ignored_flag".to_string(),
2101 "test::run_include_ignored_option".to_string(),
2102 "test::sort_tests".to_string(),
2103 ];
2104
2105 for (a, b) in expected.iter().zip(filtered) {
2106 assert!(*a == b.desc.name.to_string());
2107 }
2108 }
2109
2110 #[test]
2111 pub fn test_metricmap_compare() {
2112 let mut m1 = MetricMap::new();
2113 let mut m2 = MetricMap::new();
2114 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2115 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2116
2117 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2118 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2119
2120 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2121 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2122
2123 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2124 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2125
2126 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2127 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2128
2129 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2130 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2131 }
2132
2133 #[test]
2134 pub fn test_bench_once_no_iter() {
2135 fn f(_: &mut Bencher) {}
2136 bench::run_once(f);
2137 }
2138
2139 #[test]
2140 pub fn test_bench_once_iter() {
2141 fn f(b: &mut Bencher) {
2142 b.iter(|| {})
2143 }
2144 bench::run_once(f);
2145 }
2146
2147 #[test]
2148 pub fn test_bench_no_iter() {
2149 fn f(_: &mut Bencher) {}
2150
2151 let (tx, rx) = channel();
2152
2153 let desc = TestDesc {
2154 name: StaticTestName("f"),
2155 ignore: false,
2156 should_panic: ShouldPanic::No,
2157 allow_fail: false,
2158 };
2159
2160 ::bench::benchmark(desc, tx, true, f);
2161 rx.recv().unwrap();
2162 }
2163
2164 #[test]
2165 pub fn test_bench_iter() {
2166 fn f(b: &mut Bencher) {
2167 b.iter(|| {})
2168 }
2169
2170 let (tx, rx) = channel();
2171
2172 let desc = TestDesc {
2173 name: StaticTestName("f"),
2174 ignore: false,
2175 should_panic: ShouldPanic::No,
2176 allow_fail: false,
2177 };
2178
2179 ::bench::benchmark(desc, tx, true, f);
2180 rx.recv().unwrap();
2181 }
2182 }