]> git.proxmox.com Git - rustc.git/blob - src/libtest/lib.rs
New upstream version 1.20.0+dfsg1
[rustc.git] / src / libtest / lib.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Support code for rustc's built in unit-test and micro-benchmarking
12 //! framework.
13 //!
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
18 //!
19 //! See the [Testing Chapter](../book/first-edition/testing.html) of the book for more details.
20
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
24 // build off of.
25
26 #![crate_name = "test"]
27 #![unstable(feature = "test", issue = "27812")]
28 #![crate_type = "rlib"]
29 #![crate_type = "dylib"]
30 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
31 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
32 html_root_url = "https://doc.rust-lang.org/nightly/",
33 test(attr(deny(warnings))))]
34 #![deny(warnings)]
35
36 #![feature(asm)]
37 #![feature(libc)]
38 #![feature(set_stdio)]
39 #![feature(panic_unwind)]
40
41 extern crate getopts;
42 extern crate term;
43 extern crate libc;
44 extern crate panic_unwind;
45
46 pub use self::TestFn::*;
47 pub use self::ColorConfig::*;
48 pub use self::TestResult::*;
49 pub use self::TestName::*;
50 use self::TestEvent::*;
51 use self::NamePadding::*;
52 use self::OutputLocation::*;
53
54 use std::panic::{catch_unwind, AssertUnwindSafe};
55 use std::any::Any;
56 use std::cmp;
57 use std::collections::BTreeMap;
58 use std::env;
59 use std::fmt;
60 use std::fs::File;
61 use std::io::prelude::*;
62 use std::io;
63 use std::iter::repeat;
64 use std::path::PathBuf;
65 use std::sync::mpsc::{channel, Sender};
66 use std::sync::{Arc, Mutex};
67 use std::thread;
68 use std::time::{Instant, Duration};
69
70 const TEST_WARN_TIMEOUT_S: u64 = 60;
71
72 // to be used by rustc to compile tests in libtest
73 pub mod test {
74 pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed,
75 TrFailedMsg, TrIgnored, TrOk, Metric, MetricMap, StaticTestFn, StaticTestName,
76 DynTestName, DynTestFn, run_test, test_main, test_main_static, filter_tests,
77 parse_opts, StaticBenchFn, ShouldPanic, Options};
78 }
79
80 pub mod stats;
81
82 // The name of a test. By convention this follows the rules for rust
83 // paths; i.e. it should be a series of identifiers separated by double
84 // colons. This way if some test runner wants to arrange the tests
85 // hierarchically it may.
86
87 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
88 pub enum TestName {
89 StaticTestName(&'static str),
90 DynTestName(String),
91 }
92 impl TestName {
93 fn as_slice(&self) -> &str {
94 match *self {
95 StaticTestName(s) => s,
96 DynTestName(ref s) => s,
97 }
98 }
99 }
100 impl fmt::Display for TestName {
101 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
102 fmt::Display::fmt(self.as_slice(), f)
103 }
104 }
105
106 #[derive(Clone, Copy, PartialEq, Eq)]
107 pub enum NamePadding {
108 PadNone,
109 PadOnRight,
110 }
111
112 impl TestDesc {
113 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
114 let mut name = String::from(self.name.as_slice());
115 let fill = column_count.saturating_sub(name.len());
116 let pad = repeat(" ").take(fill).collect::<String>();
117 match align {
118 PadNone => name,
119 PadOnRight => {
120 name.push_str(&pad);
121 name
122 }
123 }
124 }
125 }
126
127 /// Represents a benchmark function.
128 pub trait TDynBenchFn: Send {
129 fn run(&self, harness: &mut Bencher);
130 }
131
132 pub trait FnBox<T>: Send + 'static {
133 fn call_box(self: Box<Self>, t: T);
134 }
135
136 impl<T, F: FnOnce(T) + Send + 'static> FnBox<T> for F {
137 fn call_box(self: Box<F>, t: T) {
138 (*self)(t)
139 }
140 }
141
142 // A function that runs a test. If the function returns successfully,
143 // the test succeeds; if the function panics then the test fails. We
144 // may need to come up with a more clever definition of test in order
145 // to support isolation of tests into threads.
146 pub enum TestFn {
147 StaticTestFn(fn()),
148 StaticBenchFn(fn(&mut Bencher)),
149 StaticMetricFn(fn(&mut MetricMap)),
150 DynTestFn(Box<FnBox<()>>),
151 DynMetricFn(Box<for<'a> FnBox<&'a mut MetricMap>>),
152 DynBenchFn(Box<TDynBenchFn + 'static>),
153 }
154
155 impl TestFn {
156 fn padding(&self) -> NamePadding {
157 match *self {
158 StaticTestFn(..) => PadNone,
159 StaticBenchFn(..) => PadOnRight,
160 StaticMetricFn(..) => PadOnRight,
161 DynTestFn(..) => PadNone,
162 DynMetricFn(..) => PadOnRight,
163 DynBenchFn(..) => PadOnRight,
164 }
165 }
166 }
167
168 impl fmt::Debug for TestFn {
169 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
170 f.write_str(match *self {
171 StaticTestFn(..) => "StaticTestFn(..)",
172 StaticBenchFn(..) => "StaticBenchFn(..)",
173 StaticMetricFn(..) => "StaticMetricFn(..)",
174 DynTestFn(..) => "DynTestFn(..)",
175 DynMetricFn(..) => "DynMetricFn(..)",
176 DynBenchFn(..) => "DynBenchFn(..)",
177 })
178 }
179 }
180
181 /// Manager of the benchmarking runs.
182 ///
183 /// This is fed into functions marked with `#[bench]` to allow for
184 /// set-up & tear-down before running a piece of code repeatedly via a
185 /// call to `iter`.
186 #[derive(Clone)]
187 pub struct Bencher {
188 mode: BenchMode,
189 summary: Option<stats::Summary>,
190 pub bytes: u64,
191 }
192
193 #[derive(Clone, PartialEq, Eq)]
194 pub enum BenchMode {
195 Auto,
196 Single,
197 }
198
199 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
200 pub enum ShouldPanic {
201 No,
202 Yes,
203 YesWithMessage(&'static str),
204 }
205
206 // The definition of a single test. A test runner will run a list of
207 // these.
208 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
209 pub struct TestDesc {
210 pub name: TestName,
211 pub ignore: bool,
212 pub should_panic: ShouldPanic,
213 pub allow_fail: bool,
214 }
215
216 #[derive(Clone)]
217 pub struct TestPaths {
218 pub file: PathBuf, // e.g., compile-test/foo/bar/baz.rs
219 pub base: PathBuf, // e.g., compile-test, auxiliary
220 pub relative_dir: PathBuf, // e.g., foo/bar
221 }
222
223 #[derive(Debug)]
224 pub struct TestDescAndFn {
225 pub desc: TestDesc,
226 pub testfn: TestFn,
227 }
228
229 #[derive(Clone, PartialEq, Debug, Copy)]
230 pub struct Metric {
231 value: f64,
232 noise: f64,
233 }
234
235 impl Metric {
236 pub fn new(value: f64, noise: f64) -> Metric {
237 Metric {
238 value: value,
239 noise: noise,
240 }
241 }
242 }
243
244 #[derive(PartialEq)]
245 pub struct MetricMap(BTreeMap<String, Metric>);
246
247 impl Clone for MetricMap {
248 fn clone(&self) -> MetricMap {
249 let MetricMap(ref map) = *self;
250 MetricMap(map.clone())
251 }
252 }
253
254 /// In case we want to add other options as well, just add them in this struct.
255 #[derive(Copy, Clone, Debug)]
256 pub struct Options {
257 display_output: bool,
258 }
259
260 impl Options {
261 pub fn new() -> Options {
262 Options {
263 display_output: false,
264 }
265 }
266
267 pub fn display_output(mut self, display_output: bool) -> Options {
268 self.display_output = display_output;
269 self
270 }
271 }
272
273 // The default console test runner. It accepts the command line
274 // arguments and a vector of test_descs.
275 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
276 let mut opts = match parse_opts(args) {
277 Some(Ok(o)) => o,
278 Some(Err(msg)) => panic!("{:?}", msg),
279 None => return,
280 };
281 opts.options = options;
282 if opts.list {
283 if let Err(e) = list_tests_console(&opts, tests) {
284 panic!("io error when listing tests: {:?}", e);
285 }
286 } else {
287 match run_tests_console(&opts, tests) {
288 Ok(true) => {}
289 Ok(false) => std::process::exit(101),
290 Err(e) => panic!("io error when running tests: {:?}", e),
291 }
292 }
293 }
294
295 // A variant optimized for invocation with a static test vector.
296 // This will panic (intentionally) when fed any dynamic tests, because
297 // it is copying the static values out into a dynamic vector and cannot
298 // copy dynamic values. It is doing this because from this point on
299 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
300 // semantics into parallel test runners, which in turn requires a Vec<>
301 // rather than a &[].
302 pub fn test_main_static(tests: &[TestDescAndFn]) {
303 let args = env::args().collect::<Vec<_>>();
304 let owned_tests = tests.iter()
305 .map(|t| {
306 match t.testfn {
307 StaticTestFn(f) => {
308 TestDescAndFn {
309 testfn: StaticTestFn(f),
310 desc: t.desc.clone(),
311 }
312 }
313 StaticBenchFn(f) => {
314 TestDescAndFn {
315 testfn: StaticBenchFn(f),
316 desc: t.desc.clone(),
317 }
318 }
319 _ => panic!("non-static tests passed to test::test_main_static"),
320 }
321 })
322 .collect();
323 test_main(&args, owned_tests, Options::new())
324 }
325
326 #[derive(Copy, Clone, Debug)]
327 pub enum ColorConfig {
328 AutoColor,
329 AlwaysColor,
330 NeverColor,
331 }
332
333 #[derive(Debug)]
334 pub struct TestOpts {
335 pub list: bool,
336 pub filter: Option<String>,
337 pub filter_exact: bool,
338 pub run_ignored: bool,
339 pub run_tests: bool,
340 pub bench_benchmarks: bool,
341 pub logfile: Option<PathBuf>,
342 pub nocapture: bool,
343 pub color: ColorConfig,
344 pub quiet: bool,
345 pub test_threads: Option<usize>,
346 pub skip: Vec<String>,
347 pub options: Options,
348 }
349
350 impl TestOpts {
351 #[cfg(test)]
352 fn new() -> TestOpts {
353 TestOpts {
354 list: false,
355 filter: None,
356 filter_exact: false,
357 run_ignored: false,
358 run_tests: false,
359 bench_benchmarks: false,
360 logfile: None,
361 nocapture: false,
362 color: AutoColor,
363 quiet: false,
364 test_threads: None,
365 skip: vec![],
366 options: Options::new(),
367 }
368 }
369 }
370
371 /// Result of parsing the options.
372 pub type OptRes = Result<TestOpts, String>;
373
374 fn optgroups() -> getopts::Options {
375 let mut opts = getopts::Options::new();
376 opts.optflag("", "ignored", "Run ignored tests")
377 .optflag("", "test", "Run tests and not benchmarks")
378 .optflag("", "bench", "Run benchmarks instead of tests")
379 .optflag("", "list", "List all tests and benchmarks")
380 .optflag("h", "help", "Display this message (longer with --help)")
381 .optopt("", "logfile", "Write logs to the specified file instead \
382 of stdout", "PATH")
383 .optflag("", "nocapture", "don't capture stdout/stderr of each \
384 task, allow printing directly")
385 .optopt("", "test-threads", "Number of threads used for running tests \
386 in parallel", "n_threads")
387 .optmulti("", "skip", "Skip tests whose names contain FILTER (this flag can \
388 be used multiple times)","FILTER")
389 .optflag("q", "quiet", "Display one character per test instead of one line")
390 .optflag("", "exact", "Exactly match filters rather than by substring")
391 .optopt("", "color", "Configure coloring of output:
392 auto = colorize if stdout is a tty and tests are run on serially (default);
393 always = always colorize output;
394 never = never colorize output;", "auto|always|never");
395 return opts
396 }
397
398 fn usage(binary: &str, options: &getopts::Options) {
399 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
400 println!(r#"{usage}
401
402 The FILTER string is tested against the name of all tests, and only those
403 tests whose names contain the filter are run.
404
405 By default, all tests are run in parallel. This can be altered with the
406 --test-threads flag or the RUST_TEST_THREADS environment variable when running
407 tests (set it to 1).
408
409 All tests have their standard output and standard error captured by default.
410 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
411 environment variable to a value other than "0". Logging is not captured by default.
412
413 Test Attributes:
414
415 #[test] - Indicates a function is a test to be run. This function
416 takes no arguments.
417 #[bench] - Indicates a function is a benchmark to be run. This
418 function takes one argument (test::Bencher).
419 #[should_panic] - This function (also labeled with #[test]) will only pass if
420 the code causes a panic (an assertion failure or panic!)
421 A message may be provided, which the failure string must
422 contain: #[should_panic(expected = "foo")].
423 #[ignore] - When applied to a function which is already attributed as a
424 test, then the test runner will ignore these tests during
425 normal test runs. Running with --ignored will run these
426 tests."#,
427 usage = options.usage(&message));
428 }
429
430 // Parses command line arguments into test options
431 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
432 let opts = optgroups();
433 let matches = match opts.parse(&args[1..]) {
434 Ok(m) => m,
435 Err(f) => return Some(Err(f.to_string())),
436 };
437
438 if matches.opt_present("h") {
439 usage(&args[0], &opts);
440 return None;
441 }
442
443 let filter = if !matches.free.is_empty() {
444 Some(matches.free[0].clone())
445 } else {
446 None
447 };
448
449 let run_ignored = matches.opt_present("ignored");
450 let quiet = matches.opt_present("quiet");
451 let exact = matches.opt_present("exact");
452 let list = matches.opt_present("list");
453
454 let logfile = matches.opt_str("logfile");
455 let logfile = logfile.map(|s| PathBuf::from(&s));
456
457 let bench_benchmarks = matches.opt_present("bench");
458 let run_tests = !bench_benchmarks || matches.opt_present("test");
459
460 let mut nocapture = matches.opt_present("nocapture");
461 if !nocapture {
462 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
463 Ok(val) => &val != "0",
464 Err(_) => false
465 };
466 }
467
468 let test_threads = match matches.opt_str("test-threads") {
469 Some(n_str) =>
470 match n_str.parse::<usize>() {
471 Ok(0) =>
472 return Some(Err(format!("argument for --test-threads must not be 0"))),
473 Ok(n) => Some(n),
474 Err(e) =>
475 return Some(Err(format!("argument for --test-threads must be a number > 0 \
476 (error: {})", e)))
477 },
478 None =>
479 None,
480 };
481
482 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
483 Some("auto") | None => AutoColor,
484 Some("always") => AlwaysColor,
485 Some("never") => NeverColor,
486
487 Some(v) => {
488 return Some(Err(format!("argument for --color must be auto, always, or never (was \
489 {})",
490 v)))
491 }
492 };
493
494 let test_opts = TestOpts {
495 list: list,
496 filter: filter,
497 filter_exact: exact,
498 run_ignored: run_ignored,
499 run_tests: run_tests,
500 bench_benchmarks: bench_benchmarks,
501 logfile: logfile,
502 nocapture: nocapture,
503 color: color,
504 quiet: quiet,
505 test_threads: test_threads,
506 skip: matches.opt_strs("skip"),
507 options: Options::new(),
508 };
509
510 Some(Ok(test_opts))
511 }
512
513 #[derive(Clone, PartialEq)]
514 pub struct BenchSamples {
515 ns_iter_summ: stats::Summary,
516 mb_s: usize,
517 }
518
519 #[derive(Clone, PartialEq)]
520 pub enum TestResult {
521 TrOk,
522 TrFailed,
523 TrFailedMsg(String),
524 TrIgnored,
525 TrAllowedFail,
526 TrMetrics(MetricMap),
527 TrBench(BenchSamples),
528 }
529
530 unsafe impl Send for TestResult {}
531
532 enum OutputLocation<T> {
533 Pretty(Box<term::StdoutTerminal>),
534 Raw(T),
535 }
536
537 struct ConsoleTestState<T> {
538 log_out: Option<File>,
539 out: OutputLocation<T>,
540 use_color: bool,
541 quiet: bool,
542 total: usize,
543 passed: usize,
544 failed: usize,
545 ignored: usize,
546 allowed_fail: usize,
547 filtered_out: usize,
548 measured: usize,
549 metrics: MetricMap,
550 failures: Vec<(TestDesc, Vec<u8>)>,
551 not_failures: Vec<(TestDesc, Vec<u8>)>,
552 max_name_len: usize, // number of columns to fill when aligning names
553 options: Options,
554 }
555
556 impl<T: Write> ConsoleTestState<T> {
557 pub fn new(opts: &TestOpts, _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
558 let log_out = match opts.logfile {
559 Some(ref path) => Some(File::create(path)?),
560 None => None,
561 };
562 let out = match term::stdout() {
563 None => Raw(io::stdout()),
564 Some(t) => Pretty(t),
565 };
566
567 Ok(ConsoleTestState {
568 out: out,
569 log_out: log_out,
570 use_color: use_color(opts),
571 quiet: opts.quiet,
572 total: 0,
573 passed: 0,
574 failed: 0,
575 ignored: 0,
576 allowed_fail: 0,
577 filtered_out: 0,
578 measured: 0,
579 metrics: MetricMap::new(),
580 failures: Vec::new(),
581 not_failures: Vec::new(),
582 max_name_len: 0,
583 options: opts.options,
584 })
585 }
586
587 pub fn write_ok(&mut self) -> io::Result<()> {
588 self.write_short_result("ok", ".", term::color::GREEN)
589 }
590
591 pub fn write_failed(&mut self) -> io::Result<()> {
592 self.write_short_result("FAILED", "F", term::color::RED)
593 }
594
595 pub fn write_ignored(&mut self) -> io::Result<()> {
596 self.write_short_result("ignored", "i", term::color::YELLOW)
597 }
598
599 pub fn write_allowed_fail(&mut self) -> io::Result<()> {
600 self.write_short_result("FAILED (allowed)", "a", term::color::YELLOW)
601 }
602
603 pub fn write_metric(&mut self) -> io::Result<()> {
604 self.write_pretty("metric", term::color::CYAN)
605 }
606
607 pub fn write_bench(&mut self) -> io::Result<()> {
608 self.write_pretty("bench", term::color::CYAN)
609 }
610
611 pub fn write_short_result(&mut self, verbose: &str, quiet: &str, color: term::color::Color)
612 -> io::Result<()> {
613 if self.quiet {
614 self.write_pretty(quiet, color)
615 } else {
616 self.write_pretty(verbose, color)?;
617 self.write_plain("\n")
618 }
619 }
620
621 pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
622 match self.out {
623 Pretty(ref mut term) => {
624 if self.use_color {
625 term.fg(color)?;
626 }
627 term.write_all(word.as_bytes())?;
628 if self.use_color {
629 term.reset()?;
630 }
631 term.flush()
632 }
633 Raw(ref mut stdout) => {
634 stdout.write_all(word.as_bytes())?;
635 stdout.flush()
636 }
637 }
638 }
639
640 pub fn write_plain<S: AsRef<str>>(&mut self, s: S) -> io::Result<()> {
641 let s = s.as_ref();
642 match self.out {
643 Pretty(ref mut term) => {
644 term.write_all(s.as_bytes())?;
645 term.flush()
646 }
647 Raw(ref mut stdout) => {
648 stdout.write_all(s.as_bytes())?;
649 stdout.flush()
650 }
651 }
652 }
653
654 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
655 self.total = len;
656 let noun = if len != 1 {
657 "tests"
658 } else {
659 "test"
660 };
661 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
662 }
663
664 pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::Result<()> {
665 if self.quiet && align != PadOnRight {
666 Ok(())
667 } else {
668 let name = test.padded_name(self.max_name_len, align);
669 self.write_plain(&format!("test {} ... ", name))
670 }
671 }
672
673 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
674 match *result {
675 TrOk => self.write_ok(),
676 TrFailed | TrFailedMsg(_) => self.write_failed(),
677 TrIgnored => self.write_ignored(),
678 TrAllowedFail => self.write_allowed_fail(),
679 TrMetrics(ref mm) => {
680 self.write_metric()?;
681 self.write_plain(&format!(": {}\n", mm.fmt_metrics()))
682 }
683 TrBench(ref bs) => {
684 self.write_bench()?;
685 self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
686 }
687 }
688 }
689
690 pub fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
691 self.write_plain(&format!("test {} has been running for over {} seconds\n",
692 desc.name,
693 TEST_WARN_TIMEOUT_S))
694 }
695
696 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
697 let msg = msg.as_ref();
698 match self.log_out {
699 None => Ok(()),
700 Some(ref mut o) => o.write_all(msg.as_bytes()),
701 }
702 }
703
704 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
705 self.write_log(
706 format!("{} {}\n",
707 match *result {
708 TrOk => "ok".to_owned(),
709 TrFailed => "failed".to_owned(),
710 TrFailedMsg(ref msg) => format!("failed: {}", msg),
711 TrIgnored => "ignored".to_owned(),
712 TrAllowedFail => "failed (allowed)".to_owned(),
713 TrMetrics(ref mm) => mm.fmt_metrics(),
714 TrBench(ref bs) => fmt_bench_samples(bs),
715 },
716 test.name))
717 }
718
719 pub fn write_failures(&mut self) -> io::Result<()> {
720 self.write_plain("\nfailures:\n")?;
721 let mut failures = Vec::new();
722 let mut fail_out = String::new();
723 for &(ref f, ref stdout) in &self.failures {
724 failures.push(f.name.to_string());
725 if !stdout.is_empty() {
726 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
727 let output = String::from_utf8_lossy(stdout);
728 fail_out.push_str(&output);
729 fail_out.push_str("\n");
730 }
731 }
732 if !fail_out.is_empty() {
733 self.write_plain("\n")?;
734 self.write_plain(&fail_out)?;
735 }
736
737 self.write_plain("\nfailures:\n")?;
738 failures.sort();
739 for name in &failures {
740 self.write_plain(&format!(" {}\n", name))?;
741 }
742 Ok(())
743 }
744
745 pub fn write_outputs(&mut self) -> io::Result<()> {
746 self.write_plain("\nsuccesses:\n")?;
747 let mut successes = Vec::new();
748 let mut stdouts = String::new();
749 for &(ref f, ref stdout) in &self.not_failures {
750 successes.push(f.name.to_string());
751 if !stdout.is_empty() {
752 stdouts.push_str(&format!("---- {} stdout ----\n\t", f.name));
753 let output = String::from_utf8_lossy(stdout);
754 stdouts.push_str(&output);
755 stdouts.push_str("\n");
756 }
757 }
758 if !stdouts.is_empty() {
759 self.write_plain("\n")?;
760 self.write_plain(&stdouts)?;
761 }
762
763 self.write_plain("\nsuccesses:\n")?;
764 successes.sort();
765 for name in &successes {
766 self.write_plain(&format!(" {}\n", name))?;
767 }
768 Ok(())
769 }
770
771 pub fn write_run_finish(&mut self) -> io::Result<bool> {
772 assert!(self.passed + self.failed + self.ignored + self.measured +
773 self.allowed_fail == self.total);
774
775 if self.options.display_output {
776 self.write_outputs()?;
777 }
778 let success = self.failed == 0;
779 if !success {
780 self.write_failures()?;
781 }
782
783 self.write_plain("\ntest result: ")?;
784 if success {
785 // There's no parallelism at this point so it's safe to use color
786 self.write_pretty("ok", term::color::GREEN)?;
787 } else {
788 self.write_pretty("FAILED", term::color::RED)?;
789 }
790 let s = if self.allowed_fail > 0 {
791 format!(
792 ". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out\n\n",
793 self.passed,
794 self.failed + self.allowed_fail,
795 self.allowed_fail,
796 self.ignored,
797 self.measured,
798 self.filtered_out)
799 } else {
800 format!(
801 ". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n",
802 self.passed,
803 self.failed,
804 self.ignored,
805 self.measured,
806 self.filtered_out)
807 };
808 self.write_plain(&s)?;
809 return Ok(success);
810 }
811 }
812
813 // Format a number with thousands separators
814 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
815 use std::fmt::Write;
816 let mut output = String::new();
817 let mut trailing = false;
818 for &pow in &[9, 6, 3, 0] {
819 let base = 10_usize.pow(pow);
820 if pow == 0 || trailing || n / base != 0 {
821 if !trailing {
822 output.write_fmt(format_args!("{}", n / base)).unwrap();
823 } else {
824 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
825 }
826 if pow != 0 {
827 output.push(sep);
828 }
829 trailing = true;
830 }
831 n %= base;
832 }
833
834 output
835 }
836
837 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
838 use std::fmt::Write;
839 let mut output = String::new();
840
841 let median = bs.ns_iter_summ.median as usize;
842 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
843
844 output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
845 fmt_thousands_sep(median, ','),
846 fmt_thousands_sep(deviation, ',')))
847 .unwrap();
848 if bs.mb_s != 0 {
849 output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
850 }
851 output
852 }
853
854 // List the tests to console, and optionally to logfile. Filters are honored.
855 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
856 let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
857
858 let mut ntest = 0;
859 let mut nbench = 0;
860 let mut nmetric = 0;
861
862 for test in filter_tests(&opts, tests) {
863 use TestFn::*;
864
865 let TestDescAndFn { desc: TestDesc { name, .. }, testfn } = test;
866
867 let fntype = match testfn {
868 StaticTestFn(..) | DynTestFn(..) => { ntest += 1; "test" },
869 StaticBenchFn(..) | DynBenchFn(..) => { nbench += 1; "benchmark" },
870 StaticMetricFn(..) | DynMetricFn(..) => { nmetric += 1; "metric" },
871 };
872
873 st.write_plain(format!("{}: {}\n", name, fntype))?;
874 st.write_log(format!("{} {}\n", fntype, name))?;
875 }
876
877 fn plural(count: u32, s: &str) -> String {
878 match count {
879 1 => format!("{} {}", 1, s),
880 n => format!("{} {}s", n, s),
881 }
882 }
883
884 if !opts.quiet {
885 if ntest != 0 || nbench != 0 || nmetric != 0 {
886 st.write_plain("\n")?;
887 }
888 st.write_plain(format!("{}, {}, {}\n",
889 plural(ntest, "test"),
890 plural(nbench, "benchmark"),
891 plural(nmetric, "metric")))?;
892 }
893
894 Ok(())
895 }
896
897 // A simple console test runner
898 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
899
900 fn callback<T: Write>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::Result<()> {
901 match (*event).clone() {
902 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
903 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
904 TeWait(ref test, padding) => st.write_test_start(test, padding),
905 TeTimeout(ref test) => st.write_timeout(test),
906 TeResult(test, result, stdout) => {
907 st.write_log_result(&test, &result)?;
908 st.write_result(&result)?;
909 match result {
910 TrOk => {
911 st.passed += 1;
912 st.not_failures.push((test, stdout));
913 }
914 TrIgnored => st.ignored += 1,
915 TrAllowedFail => st.allowed_fail += 1,
916 TrMetrics(mm) => {
917 let tname = test.name;
918 let MetricMap(mm) = mm;
919 for (k, v) in &mm {
920 st.metrics
921 .insert_metric(&format!("{}.{}", tname, k), v.value, v.noise);
922 }
923 st.measured += 1
924 }
925 TrBench(bs) => {
926 st.metrics.insert_metric(test.name.as_slice(),
927 bs.ns_iter_summ.median,
928 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
929 st.measured += 1
930 }
931 TrFailed => {
932 st.failed += 1;
933 st.failures.push((test, stdout));
934 }
935 TrFailedMsg(msg) => {
936 st.failed += 1;
937 let mut stdout = stdout;
938 stdout.extend_from_slice(
939 format!("note: {}", msg).as_bytes()
940 );
941 st.failures.push((test, stdout));
942 }
943 }
944 Ok(())
945 }
946 }
947 }
948
949 let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
950 fn len_if_padded(t: &TestDescAndFn) -> usize {
951 match t.testfn.padding() {
952 PadNone => 0,
953 PadOnRight => t.desc.name.as_slice().len(),
954 }
955 }
956 if let Some(t) = tests.iter().max_by_key(|t| len_if_padded(*t)) {
957 let n = t.desc.name.as_slice();
958 st.max_name_len = n.len();
959 }
960 run_tests(opts, tests, |x| callback(&x, &mut st))?;
961 return st.write_run_finish();
962 }
963
964 #[test]
965 fn should_sort_failures_before_printing_them() {
966 let test_a = TestDesc {
967 name: StaticTestName("a"),
968 ignore: false,
969 should_panic: ShouldPanic::No,
970 allow_fail: false,
971 };
972
973 let test_b = TestDesc {
974 name: StaticTestName("b"),
975 ignore: false,
976 should_panic: ShouldPanic::No,
977 allow_fail: false,
978 };
979
980 let mut st = ConsoleTestState {
981 log_out: None,
982 out: Raw(Vec::new()),
983 use_color: false,
984 quiet: false,
985 total: 0,
986 passed: 0,
987 failed: 0,
988 ignored: 0,
989 allowed_fail: 0,
990 filtered_out: 0,
991 measured: 0,
992 max_name_len: 10,
993 metrics: MetricMap::new(),
994 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
995 options: Options::new(),
996 not_failures: Vec::new(),
997 };
998
999 st.write_failures().unwrap();
1000 let s = match st.out {
1001 Raw(ref m) => String::from_utf8_lossy(&m[..]),
1002 Pretty(_) => unreachable!(),
1003 };
1004
1005 let apos = s.find("a").unwrap();
1006 let bpos = s.find("b").unwrap();
1007 assert!(apos < bpos);
1008 }
1009
1010 fn use_color(opts: &TestOpts) -> bool {
1011 match opts.color {
1012 AutoColor => !opts.nocapture && stdout_isatty(),
1013 AlwaysColor => true,
1014 NeverColor => false,
1015 }
1016 }
1017
1018 #[cfg(target_os = "redox")]
1019 fn stdout_isatty() -> bool {
1020 // FIXME: Implement isatty on Redox
1021 false
1022 }
1023 #[cfg(unix)]
1024 fn stdout_isatty() -> bool {
1025 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1026 }
1027 #[cfg(windows)]
1028 fn stdout_isatty() -> bool {
1029 type DWORD = u32;
1030 type BOOL = i32;
1031 type HANDLE = *mut u8;
1032 type LPDWORD = *mut u32;
1033 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1034 extern "system" {
1035 fn GetStdHandle(which: DWORD) -> HANDLE;
1036 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1037 }
1038 unsafe {
1039 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1040 let mut out = 0;
1041 GetConsoleMode(handle, &mut out) != 0
1042 }
1043 }
1044
1045 #[derive(Clone)]
1046 pub enum TestEvent {
1047 TeFiltered(Vec<TestDesc>),
1048 TeWait(TestDesc, NamePadding),
1049 TeResult(TestDesc, TestResult, Vec<u8>),
1050 TeTimeout(TestDesc),
1051 TeFilteredOut(usize),
1052 }
1053
1054 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1055
1056
1057 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1058 where F: FnMut(TestEvent) -> io::Result<()>
1059 {
1060 use std::collections::HashMap;
1061 use std::sync::mpsc::RecvTimeoutError;
1062
1063 let tests_len = tests.len();
1064
1065 let mut filtered_tests = filter_tests(opts, tests);
1066 if !opts.bench_benchmarks {
1067 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1068 }
1069
1070 let filtered_out = tests_len - filtered_tests.len();
1071 callback(TeFilteredOut(filtered_out))?;
1072
1073 let filtered_descs = filtered_tests.iter()
1074 .map(|t| t.desc.clone())
1075 .collect();
1076
1077 callback(TeFiltered(filtered_descs))?;
1078
1079 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
1080 filtered_tests.into_iter().partition(|e| {
1081 match e.testfn {
1082 StaticTestFn(_) | DynTestFn(_) => true,
1083 _ => false,
1084 }
1085 });
1086
1087 let concurrency = match opts.test_threads {
1088 Some(n) => n,
1089 None => get_concurrency(),
1090 };
1091
1092 let mut remaining = filtered_tests;
1093 remaining.reverse();
1094 let mut pending = 0;
1095
1096 let (tx, rx) = channel::<MonitorMsg>();
1097
1098 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
1099
1100 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
1101 let now = Instant::now();
1102 let timed_out = running_tests.iter()
1103 .filter_map(|(desc, timeout)| if &now >= timeout { Some(desc.clone())} else { None })
1104 .collect();
1105 for test in &timed_out {
1106 running_tests.remove(test);
1107 }
1108 timed_out
1109 };
1110
1111 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
1112 running_tests.values().min().map(|next_timeout| {
1113 let now = Instant::now();
1114 if *next_timeout >= now {
1115 *next_timeout - now
1116 } else {
1117 Duration::new(0, 0)
1118 }})
1119 };
1120
1121 while pending > 0 || !remaining.is_empty() {
1122 while pending < concurrency && !remaining.is_empty() {
1123 let test = remaining.pop().unwrap();
1124 if concurrency == 1 {
1125 // We are doing one test at a time so we can print the name
1126 // of the test before we run it. Useful for debugging tests
1127 // that hang forever.
1128 callback(TeWait(test.desc.clone(), test.testfn.padding()))?;
1129 }
1130 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1131 running_tests.insert(test.desc.clone(), timeout);
1132 run_test(opts, !opts.run_tests, test, tx.clone());
1133 pending += 1;
1134 }
1135
1136 let mut res;
1137 loop {
1138 if let Some(timeout) = calc_timeout(&running_tests) {
1139 res = rx.recv_timeout(timeout);
1140 for test in get_timed_out_tests(&mut running_tests) {
1141 callback(TeTimeout(test))?;
1142 }
1143 if res != Err(RecvTimeoutError::Timeout) {
1144 break;
1145 }
1146 } else {
1147 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1148 break;
1149 }
1150 }
1151
1152 let (desc, result, stdout) = res.unwrap();
1153 running_tests.remove(&desc);
1154
1155 if concurrency != 1 {
1156 callback(TeWait(desc.clone(), PadNone))?;
1157 }
1158 callback(TeResult(desc, result, stdout))?;
1159 pending -= 1;
1160 }
1161
1162 if opts.bench_benchmarks {
1163 // All benchmarks run at the end, in serial.
1164 // (this includes metric fns)
1165 for b in filtered_benchs_and_metrics {
1166 callback(TeWait(b.desc.clone(), b.testfn.padding()))?;
1167 run_test(opts, false, b, tx.clone());
1168 let (test, result, stdout) = rx.recv().unwrap();
1169 callback(TeResult(test, result, stdout))?;
1170 }
1171 }
1172 Ok(())
1173 }
1174
1175 #[allow(deprecated)]
1176 fn get_concurrency() -> usize {
1177 return match env::var("RUST_TEST_THREADS") {
1178 Ok(s) => {
1179 let opt_n: Option<usize> = s.parse().ok();
1180 match opt_n {
1181 Some(n) if n > 0 => n,
1182 _ => {
1183 panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.",
1184 s)
1185 }
1186 }
1187 }
1188 Err(..) => num_cpus(),
1189 };
1190
1191 #[cfg(windows)]
1192 #[allow(bad_style)]
1193 fn num_cpus() -> usize {
1194 #[repr(C)]
1195 struct SYSTEM_INFO {
1196 wProcessorArchitecture: u16,
1197 wReserved: u16,
1198 dwPageSize: u32,
1199 lpMinimumApplicationAddress: *mut u8,
1200 lpMaximumApplicationAddress: *mut u8,
1201 dwActiveProcessorMask: *mut u8,
1202 dwNumberOfProcessors: u32,
1203 dwProcessorType: u32,
1204 dwAllocationGranularity: u32,
1205 wProcessorLevel: u16,
1206 wProcessorRevision: u16,
1207 }
1208 extern "system" {
1209 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1210 }
1211 unsafe {
1212 let mut sysinfo = std::mem::zeroed();
1213 GetSystemInfo(&mut sysinfo);
1214 sysinfo.dwNumberOfProcessors as usize
1215 }
1216 }
1217
1218 #[cfg(target_os = "redox")]
1219 fn num_cpus() -> usize {
1220 // FIXME: Implement num_cpus on Redox
1221 1
1222 }
1223
1224 #[cfg(any(target_os = "linux",
1225 target_os = "macos",
1226 target_os = "ios",
1227 target_os = "android",
1228 target_os = "solaris",
1229 target_os = "emscripten",
1230 target_os = "fuchsia"))]
1231 fn num_cpus() -> usize {
1232 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1233 }
1234
1235 #[cfg(any(target_os = "freebsd",
1236 target_os = "dragonfly",
1237 target_os = "bitrig",
1238 target_os = "netbsd"))]
1239 fn num_cpus() -> usize {
1240 use std::ptr;
1241
1242 let mut cpus: libc::c_uint = 0;
1243 let mut cpus_size = std::mem::size_of_val(&cpus);
1244
1245 unsafe {
1246 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1247 }
1248 if cpus < 1 {
1249 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1250 unsafe {
1251 libc::sysctl(mib.as_mut_ptr(),
1252 2,
1253 &mut cpus as *mut _ as *mut _,
1254 &mut cpus_size as *mut _ as *mut _,
1255 ptr::null_mut(),
1256 0);
1257 }
1258 if cpus < 1 {
1259 cpus = 1;
1260 }
1261 }
1262 cpus as usize
1263 }
1264
1265 #[cfg(target_os = "openbsd")]
1266 fn num_cpus() -> usize {
1267 use std::ptr;
1268
1269 let mut cpus: libc::c_uint = 0;
1270 let mut cpus_size = std::mem::size_of_val(&cpus);
1271 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1272
1273 unsafe {
1274 libc::sysctl(mib.as_mut_ptr(),
1275 2,
1276 &mut cpus as *mut _ as *mut _,
1277 &mut cpus_size as *mut _ as *mut _,
1278 ptr::null_mut(),
1279 0);
1280 }
1281 if cpus < 1 {
1282 cpus = 1;
1283 }
1284 cpus as usize
1285 }
1286
1287 #[cfg(target_os = "haiku")]
1288 fn num_cpus() -> usize {
1289 // FIXME: implement
1290 1
1291 }
1292 }
1293
1294 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1295 let mut filtered = tests;
1296
1297 // Remove tests that don't match the test filter
1298 filtered = match opts.filter {
1299 None => filtered,
1300 Some(ref filter) => {
1301 filtered.into_iter()
1302 .filter(|test| {
1303 if opts.filter_exact {
1304 test.desc.name.as_slice() == &filter[..]
1305 } else {
1306 test.desc.name.as_slice().contains(&filter[..])
1307 }
1308 })
1309 .collect()
1310 }
1311 };
1312
1313 // Skip tests that match any of the skip filters
1314 filtered = filtered.into_iter()
1315 .filter(|t| !opts.skip.iter().any(|sf| {
1316 if opts.filter_exact {
1317 t.desc.name.as_slice() == &sf[..]
1318 } else {
1319 t.desc.name.as_slice().contains(&sf[..])
1320 }
1321 }))
1322 .collect();
1323
1324 // Maybe pull out the ignored test and unignore them
1325 filtered = if !opts.run_ignored {
1326 filtered
1327 } else {
1328 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1329 if test.desc.ignore {
1330 let TestDescAndFn {desc, testfn} = test;
1331 Some(TestDescAndFn {
1332 desc: TestDesc { ignore: false, ..desc },
1333 testfn: testfn,
1334 })
1335 } else {
1336 None
1337 }
1338 }
1339 filtered.into_iter().filter_map(filter).collect()
1340 };
1341
1342 // Sort the tests alphabetically
1343 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1344
1345 filtered
1346 }
1347
1348 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1349 // convert benchmarks to tests, if we're not benchmarking them
1350 tests.into_iter().map(|x| {
1351 let testfn = match x.testfn {
1352 DynBenchFn(bench) => {
1353 DynTestFn(Box::new(move |()| {
1354 bench::run_once(|b| {
1355 __rust_begin_short_backtrace(|| bench.run(b))
1356 })
1357 }))
1358 }
1359 StaticBenchFn(benchfn) => {
1360 DynTestFn(Box::new(move |()| {
1361 bench::run_once(|b| {
1362 __rust_begin_short_backtrace(|| benchfn(b))
1363 })
1364 }))
1365 }
1366 f => f,
1367 };
1368 TestDescAndFn {
1369 desc: x.desc,
1370 testfn: testfn,
1371 }
1372 }).collect()
1373 }
1374
1375 pub fn run_test(opts: &TestOpts,
1376 force_ignore: bool,
1377 test: TestDescAndFn,
1378 monitor_ch: Sender<MonitorMsg>) {
1379
1380 let TestDescAndFn {desc, testfn} = test;
1381
1382 if force_ignore || desc.ignore {
1383 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1384 return;
1385 }
1386
1387 fn run_test_inner(desc: TestDesc,
1388 monitor_ch: Sender<MonitorMsg>,
1389 nocapture: bool,
1390 testfn: Box<FnBox<()>>) {
1391 struct Sink(Arc<Mutex<Vec<u8>>>);
1392 impl Write for Sink {
1393 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1394 Write::write(&mut *self.0.lock().unwrap(), data)
1395 }
1396 fn flush(&mut self) -> io::Result<()> {
1397 Ok(())
1398 }
1399 }
1400
1401 // Buffer for capturing standard I/O
1402 let data = Arc::new(Mutex::new(Vec::new()));
1403 let data2 = data.clone();
1404
1405 let name = desc.name.clone();
1406 let runtest = move || {
1407 let oldio = if !nocapture {
1408 Some((
1409 io::set_print(Some(Box::new(Sink(data2.clone())))),
1410 io::set_panic(Some(Box::new(Sink(data2))))
1411 ))
1412 } else {
1413 None
1414 };
1415
1416 let result = catch_unwind(AssertUnwindSafe(|| {
1417 testfn.call_box(())
1418 }));
1419
1420 if let Some((printio, panicio)) = oldio {
1421 io::set_print(printio);
1422 io::set_panic(panicio);
1423 };
1424
1425 let test_result = calc_result(&desc, result);
1426 let stdout = data.lock().unwrap().to_vec();
1427 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1428 };
1429
1430
1431 // If the platform is single-threaded we're just going to run
1432 // the test synchronously, regardless of the concurrency
1433 // level.
1434 let supports_threads = !cfg!(target_os = "emscripten");
1435 if supports_threads {
1436 let cfg = thread::Builder::new().name(match name {
1437 DynTestName(ref name) => name.clone(),
1438 StaticTestName(name) => name.to_owned(),
1439 });
1440 cfg.spawn(runtest).unwrap();
1441 } else {
1442 runtest();
1443 }
1444 }
1445
1446 match testfn {
1447 DynBenchFn(bencher) => {
1448 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1449 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1450 return;
1451 }
1452 StaticBenchFn(benchfn) => {
1453 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1454 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1455 return;
1456 }
1457 DynMetricFn(f) => {
1458 let mut mm = MetricMap::new();
1459 f.call_box(&mut mm);
1460 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1461 return;
1462 }
1463 StaticMetricFn(f) => {
1464 let mut mm = MetricMap::new();
1465 f(&mut mm);
1466 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1467 return;
1468 }
1469 DynTestFn(f) => {
1470 let cb = move |()| {
1471 __rust_begin_short_backtrace(|| f.call_box(()))
1472 };
1473 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
1474 }
1475 StaticTestFn(f) =>
1476 run_test_inner(desc, monitor_ch, opts.nocapture,
1477 Box::new(move |()| __rust_begin_short_backtrace(f))),
1478 }
1479 }
1480
1481 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1482 #[inline(never)]
1483 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1484 f()
1485 }
1486
1487 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
1488 match (&desc.should_panic, task_result) {
1489 (&ShouldPanic::No, Ok(())) |
1490 (&ShouldPanic::Yes, Err(_)) => TrOk,
1491 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) =>
1492 if err.downcast_ref::<String>()
1493 .map(|e| &**e)
1494 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1495 .map(|e| e.contains(msg))
1496 .unwrap_or(false) {
1497 TrOk
1498 } else {
1499 if desc.allow_fail {
1500 TrAllowedFail
1501 } else {
1502 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1503 }
1504 },
1505 _ if desc.allow_fail => TrAllowedFail,
1506 _ => TrFailed,
1507 }
1508 }
1509
1510 impl MetricMap {
1511 pub fn new() -> MetricMap {
1512 MetricMap(BTreeMap::new())
1513 }
1514
1515 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1516 /// must be non-negative. The `noise` indicates the uncertainty of the
1517 /// metric, which doubles as the "noise range" of acceptable
1518 /// pairwise-regressions on this named value, when comparing from one
1519 /// metric to the next using `compare_to_old`.
1520 ///
1521 /// If `noise` is positive, then it means this metric is of a value
1522 /// you want to see grow smaller, so a change larger than `noise` in the
1523 /// positive direction represents a regression.
1524 ///
1525 /// If `noise` is negative, then it means this metric is of a value
1526 /// you want to see grow larger, so a change larger than `noise` in the
1527 /// negative direction represents a regression.
1528 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1529 let m = Metric {
1530 value: value,
1531 noise: noise,
1532 };
1533 let MetricMap(ref mut map) = *self;
1534 map.insert(name.to_owned(), m);
1535 }
1536
1537 pub fn fmt_metrics(&self) -> String {
1538 let MetricMap(ref mm) = *self;
1539 let v: Vec<String> = mm.iter()
1540 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1541 .collect();
1542 v.join(", ")
1543 }
1544 }
1545
1546
1547 // Benchmarking
1548
1549 /// A function that is opaque to the optimizer, to allow benchmarks to
1550 /// pretend to use outputs to assist in avoiding dead-code
1551 /// elimination.
1552 ///
1553 /// This function is a no-op, and does not even read from `dummy`.
1554 #[cfg(not(any(all(target_os = "nacl", target_arch = "le32"),
1555 target_arch = "asmjs", target_arch = "wasm32")))]
1556 pub fn black_box<T>(dummy: T) -> T {
1557 // we need to "use" the argument in some way LLVM can't
1558 // introspect.
1559 unsafe { asm!("" : : "r"(&dummy)) }
1560 dummy
1561 }
1562 #[cfg(any(all(target_os = "nacl", target_arch = "le32"),
1563 target_arch = "asmjs", target_arch = "wasm32"))]
1564 #[inline(never)]
1565 pub fn black_box<T>(dummy: T) -> T {
1566 dummy
1567 }
1568
1569
1570 impl Bencher {
1571 /// Callback for benchmark functions to run in their body.
1572 pub fn iter<T, F>(&mut self, mut inner: F)
1573 where F: FnMut() -> T
1574 {
1575 if self.mode == BenchMode::Single {
1576 ns_iter_inner(&mut inner, 1);
1577 return;
1578 }
1579
1580 self.summary = Some(iter(&mut inner));
1581 }
1582
1583 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1584 where F: FnMut(&mut Bencher)
1585 {
1586 f(self);
1587 return self.summary;
1588 }
1589 }
1590
1591 fn ns_from_dur(dur: Duration) -> u64 {
1592 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1593 }
1594
1595 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1596 where F: FnMut() -> T
1597 {
1598 let start = Instant::now();
1599 for _ in 0..k {
1600 black_box(inner());
1601 }
1602 return ns_from_dur(start.elapsed());
1603 }
1604
1605
1606 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1607 where F: FnMut() -> T
1608 {
1609 // Initial bench run to get ballpark figure.
1610 let ns_single = ns_iter_inner(inner, 1);
1611
1612 // Try to estimate iter count for 1ms falling back to 1m
1613 // iterations if first run took < 1ns.
1614 let ns_target_total = 1_000_000; // 1ms
1615 let mut n = ns_target_total / cmp::max(1, ns_single);
1616
1617 // if the first run took more than 1ms we don't want to just
1618 // be left doing 0 iterations on every loop. The unfortunate
1619 // side effect of not being able to do as many runs is
1620 // automatically handled by the statistical analysis below
1621 // (i.e. larger error bars).
1622 n = cmp::max(1, n);
1623
1624 let mut total_run = Duration::new(0, 0);
1625 let samples: &mut [f64] = &mut [0.0_f64; 50];
1626 loop {
1627 let loop_start = Instant::now();
1628
1629 for p in &mut *samples {
1630 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1631 }
1632
1633 stats::winsorize(samples, 5.0);
1634 let summ = stats::Summary::new(samples);
1635
1636 for p in &mut *samples {
1637 let ns = ns_iter_inner(inner, 5 * n);
1638 *p = ns as f64 / (5 * n) as f64;
1639 }
1640
1641 stats::winsorize(samples, 5.0);
1642 let summ5 = stats::Summary::new(samples);
1643
1644 let loop_run = loop_start.elapsed();
1645
1646 // If we've run for 100ms and seem to have converged to a
1647 // stable median.
1648 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 &&
1649 summ.median - summ5.median < summ5.median_abs_dev {
1650 return summ5;
1651 }
1652
1653 total_run = total_run + loop_run;
1654 // Longest we ever run for is 3s.
1655 if total_run > Duration::from_secs(3) {
1656 return summ5;
1657 }
1658
1659 // If we overflow here just return the results so far. We check a
1660 // multiplier of 10 because we're about to multiply by 2 and the
1661 // next iteration of the loop will also multiply by 5 (to calculate
1662 // the summ5 result)
1663 n = match n.checked_mul(10) {
1664 Some(_) => n * 2,
1665 None => {
1666 return summ5;
1667 }
1668 };
1669 }
1670 }
1671
1672 pub mod bench {
1673 use std::cmp;
1674 use stats;
1675 use super::{Bencher, BenchSamples, BenchMode};
1676
1677 pub fn benchmark<F>(f: F) -> BenchSamples
1678 where F: FnMut(&mut Bencher)
1679 {
1680 let mut bs = Bencher {
1681 mode: BenchMode::Auto,
1682 summary: None,
1683 bytes: 0,
1684 };
1685
1686 return match bs.bench(f) {
1687 Some(ns_iter_summ) => {
1688 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1689 let mb_s = bs.bytes * 1000 / ns_iter;
1690
1691 BenchSamples {
1692 ns_iter_summ: ns_iter_summ,
1693 mb_s: mb_s as usize,
1694 }
1695 }
1696 None => {
1697 // iter not called, so no data.
1698 // FIXME: error in this case?
1699 let samples: &mut [f64] = &mut [0.0_f64; 1];
1700 BenchSamples {
1701 ns_iter_summ: stats::Summary::new(samples),
1702 mb_s: 0,
1703 }
1704 }
1705 };
1706 }
1707
1708 pub fn run_once<F>(f: F)
1709 where F: FnMut(&mut Bencher)
1710 {
1711 let mut bs = Bencher {
1712 mode: BenchMode::Single,
1713 summary: None,
1714 bytes: 0,
1715 };
1716 bs.bench(f);
1717 }
1718 }
1719
1720 #[cfg(test)]
1721 mod tests {
1722 use test::{TrFailed, TrFailedMsg, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc,
1723 TestDescAndFn, TestOpts, run_test, MetricMap, StaticTestName, DynTestName,
1724 DynTestFn, ShouldPanic};
1725 use std::sync::mpsc::channel;
1726 use bench;
1727 use Bencher;
1728
1729 #[test]
1730 pub fn do_not_run_ignored_tests() {
1731 fn f() {
1732 panic!();
1733 }
1734 let desc = TestDescAndFn {
1735 desc: TestDesc {
1736 name: StaticTestName("whatever"),
1737 ignore: true,
1738 should_panic: ShouldPanic::No,
1739 allow_fail: false,
1740 },
1741 testfn: DynTestFn(Box::new(move |()| f())),
1742 };
1743 let (tx, rx) = channel();
1744 run_test(&TestOpts::new(), false, desc, tx);
1745 let (_, res, _) = rx.recv().unwrap();
1746 assert!(res != TrOk);
1747 }
1748
1749 #[test]
1750 pub fn ignored_tests_result_in_ignored() {
1751 fn f() {}
1752 let desc = TestDescAndFn {
1753 desc: TestDesc {
1754 name: StaticTestName("whatever"),
1755 ignore: true,
1756 should_panic: ShouldPanic::No,
1757 allow_fail: false,
1758 },
1759 testfn: DynTestFn(Box::new(move |()| f())),
1760 };
1761 let (tx, rx) = channel();
1762 run_test(&TestOpts::new(), false, desc, tx);
1763 let (_, res, _) = rx.recv().unwrap();
1764 assert!(res == TrIgnored);
1765 }
1766
1767 #[test]
1768 fn test_should_panic() {
1769 fn f() {
1770 panic!();
1771 }
1772 let desc = TestDescAndFn {
1773 desc: TestDesc {
1774 name: StaticTestName("whatever"),
1775 ignore: false,
1776 should_panic: ShouldPanic::Yes,
1777 allow_fail: false,
1778 },
1779 testfn: DynTestFn(Box::new(move |()| f())),
1780 };
1781 let (tx, rx) = channel();
1782 run_test(&TestOpts::new(), false, desc, tx);
1783 let (_, res, _) = rx.recv().unwrap();
1784 assert!(res == TrOk);
1785 }
1786
1787 #[test]
1788 fn test_should_panic_good_message() {
1789 fn f() {
1790 panic!("an error message");
1791 }
1792 let desc = TestDescAndFn {
1793 desc: TestDesc {
1794 name: StaticTestName("whatever"),
1795 ignore: false,
1796 should_panic: ShouldPanic::YesWithMessage("error message"),
1797 allow_fail: false,
1798 },
1799 testfn: DynTestFn(Box::new(move |()| f())),
1800 };
1801 let (tx, rx) = channel();
1802 run_test(&TestOpts::new(), false, desc, tx);
1803 let (_, res, _) = rx.recv().unwrap();
1804 assert!(res == TrOk);
1805 }
1806
1807 #[test]
1808 fn test_should_panic_bad_message() {
1809 fn f() {
1810 panic!("an error message");
1811 }
1812 let expected = "foobar";
1813 let failed_msg = "Panic did not include expected string";
1814 let desc = TestDescAndFn {
1815 desc: TestDesc {
1816 name: StaticTestName("whatever"),
1817 ignore: false,
1818 should_panic: ShouldPanic::YesWithMessage(expected),
1819 allow_fail: false,
1820 },
1821 testfn: DynTestFn(Box::new(move |()| f())),
1822 };
1823 let (tx, rx) = channel();
1824 run_test(&TestOpts::new(), false, desc, tx);
1825 let (_, res, _) = rx.recv().unwrap();
1826 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1827 }
1828
1829 #[test]
1830 fn test_should_panic_but_succeeds() {
1831 fn f() {}
1832 let desc = TestDescAndFn {
1833 desc: TestDesc {
1834 name: StaticTestName("whatever"),
1835 ignore: false,
1836 should_panic: ShouldPanic::Yes,
1837 allow_fail: false,
1838 },
1839 testfn: DynTestFn(Box::new(move |()| f())),
1840 };
1841 let (tx, rx) = channel();
1842 run_test(&TestOpts::new(), false, desc, tx);
1843 let (_, res, _) = rx.recv().unwrap();
1844 assert!(res == TrFailed);
1845 }
1846
1847 #[test]
1848 fn parse_ignored_flag() {
1849 let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
1850 let opts = match parse_opts(&args) {
1851 Some(Ok(o)) => o,
1852 _ => panic!("Malformed arg in parse_ignored_flag"),
1853 };
1854 assert!((opts.run_ignored));
1855 }
1856
1857 #[test]
1858 pub fn filter_for_ignored_option() {
1859 // When we run ignored tests the test filter should filter out all the
1860 // unignored tests and flip the ignore flag on the rest to false
1861
1862 let mut opts = TestOpts::new();
1863 opts.run_tests = true;
1864 opts.run_ignored = true;
1865
1866 let tests = vec![TestDescAndFn {
1867 desc: TestDesc {
1868 name: StaticTestName("1"),
1869 ignore: true,
1870 should_panic: ShouldPanic::No,
1871 allow_fail: false,
1872 },
1873 testfn: DynTestFn(Box::new(move |()| {})),
1874 },
1875 TestDescAndFn {
1876 desc: TestDesc {
1877 name: StaticTestName("2"),
1878 ignore: false,
1879 should_panic: ShouldPanic::No,
1880 allow_fail: false,
1881 },
1882 testfn: DynTestFn(Box::new(move |()| {})),
1883 }];
1884 let filtered = filter_tests(&opts, tests);
1885
1886 assert_eq!(filtered.len(), 1);
1887 assert_eq!(filtered[0].desc.name.to_string(), "1");
1888 assert!(!filtered[0].desc.ignore);
1889 }
1890
1891 #[test]
1892 pub fn exact_filter_match() {
1893 fn tests() -> Vec<TestDescAndFn> {
1894 vec!["base",
1895 "base::test",
1896 "base::test1",
1897 "base::test2",
1898 ].into_iter()
1899 .map(|name| TestDescAndFn {
1900 desc: TestDesc {
1901 name: StaticTestName(name),
1902 ignore: false,
1903 should_panic: ShouldPanic::No,
1904 allow_fail: false,
1905 },
1906 testfn: DynTestFn(Box::new(move |()| {}))
1907 })
1908 .collect()
1909 }
1910
1911 let substr = filter_tests(&TestOpts {
1912 filter: Some("base".into()),
1913 ..TestOpts::new()
1914 }, tests());
1915 assert_eq!(substr.len(), 4);
1916
1917 let substr = filter_tests(&TestOpts {
1918 filter: Some("bas".into()),
1919 ..TestOpts::new()
1920 }, tests());
1921 assert_eq!(substr.len(), 4);
1922
1923 let substr = filter_tests(&TestOpts {
1924 filter: Some("::test".into()),
1925 ..TestOpts::new()
1926 }, tests());
1927 assert_eq!(substr.len(), 3);
1928
1929 let substr = filter_tests(&TestOpts {
1930 filter: Some("base::test".into()),
1931 ..TestOpts::new()
1932 }, tests());
1933 assert_eq!(substr.len(), 3);
1934
1935 let exact = filter_tests(&TestOpts {
1936 filter: Some("base".into()),
1937 filter_exact: true, ..TestOpts::new()
1938 }, tests());
1939 assert_eq!(exact.len(), 1);
1940
1941 let exact = filter_tests(&TestOpts {
1942 filter: Some("bas".into()),
1943 filter_exact: true,
1944 ..TestOpts::new()
1945 }, tests());
1946 assert_eq!(exact.len(), 0);
1947
1948 let exact = filter_tests(&TestOpts {
1949 filter: Some("::test".into()),
1950 filter_exact: true,
1951 ..TestOpts::new()
1952 }, tests());
1953 assert_eq!(exact.len(), 0);
1954
1955 let exact = filter_tests(&TestOpts {
1956 filter: Some("base::test".into()),
1957 filter_exact: true,
1958 ..TestOpts::new()
1959 }, tests());
1960 assert_eq!(exact.len(), 1);
1961 }
1962
1963 #[test]
1964 pub fn sort_tests() {
1965 let mut opts = TestOpts::new();
1966 opts.run_tests = true;
1967
1968 let names = vec!["sha1::test".to_string(),
1969 "isize::test_to_str".to_string(),
1970 "isize::test_pow".to_string(),
1971 "test::do_not_run_ignored_tests".to_string(),
1972 "test::ignored_tests_result_in_ignored".to_string(),
1973 "test::first_free_arg_should_be_a_filter".to_string(),
1974 "test::parse_ignored_flag".to_string(),
1975 "test::filter_for_ignored_option".to_string(),
1976 "test::sort_tests".to_string()];
1977 let tests = {
1978 fn testfn() {}
1979 let mut tests = Vec::new();
1980 for name in &names {
1981 let test = TestDescAndFn {
1982 desc: TestDesc {
1983 name: DynTestName((*name).clone()),
1984 ignore: false,
1985 should_panic: ShouldPanic::No,
1986 allow_fail: false,
1987 },
1988 testfn: DynTestFn(Box::new(move |()| testfn())),
1989 };
1990 tests.push(test);
1991 }
1992 tests
1993 };
1994 let filtered = filter_tests(&opts, tests);
1995
1996 let expected = vec!["isize::test_pow".to_string(),
1997 "isize::test_to_str".to_string(),
1998 "sha1::test".to_string(),
1999 "test::do_not_run_ignored_tests".to_string(),
2000 "test::filter_for_ignored_option".to_string(),
2001 "test::first_free_arg_should_be_a_filter".to_string(),
2002 "test::ignored_tests_result_in_ignored".to_string(),
2003 "test::parse_ignored_flag".to_string(),
2004 "test::sort_tests".to_string()];
2005
2006 for (a, b) in expected.iter().zip(filtered) {
2007 assert!(*a == b.desc.name.to_string());
2008 }
2009 }
2010
2011 #[test]
2012 pub fn test_metricmap_compare() {
2013 let mut m1 = MetricMap::new();
2014 let mut m2 = MetricMap::new();
2015 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2016 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2017
2018 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2019 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2020
2021 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2022 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2023
2024 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2025 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2026
2027 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2028 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2029
2030 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2031 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2032 }
2033
2034 #[test]
2035 pub fn test_bench_once_no_iter() {
2036 fn f(_: &mut Bencher) {}
2037 bench::run_once(f);
2038 }
2039
2040 #[test]
2041 pub fn test_bench_once_iter() {
2042 fn f(b: &mut Bencher) {
2043 b.iter(|| {
2044 })
2045 }
2046 bench::run_once(f);
2047 }
2048
2049 #[test]
2050 pub fn test_bench_no_iter() {
2051 fn f(_: &mut Bencher) {}
2052 bench::benchmark(f);
2053 }
2054
2055 #[test]
2056 pub fn test_bench_iter() {
2057 fn f(b: &mut Bencher) {
2058 b.iter(|| {
2059 })
2060 }
2061 bench::benchmark(f);
2062 }
2063 }