1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
27 #![cfg_attr(stage0, feature(custom_attribute))]
28 #![crate_name = "test"]
29 #![unstable(feature = "test", issue = "27812")]
30 #![cfg_attr(stage0, staged_api)]
31 #![crate_type = "rlib"]
32 #![crate_type = "dylib"]
33 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
34 html_favicon_url
= "https://doc.rust-lang.org/favicon.ico",
35 html_root_url
= "https://doc.rust-lang.org/nightly/",
36 test(attr(deny(warnings
))))]
39 #![feature(box_syntax)]
42 #![feature(rustc_private)]
43 #![feature(set_stdio)]
44 #![feature(staged_api)]
48 extern crate serialize
;
49 extern crate serialize
as rustc_serialize
;
53 pub use self::TestFn
::*;
54 pub use self::ColorConfig
::*;
55 pub use self::TestResult
::*;
56 pub use self::TestName
::*;
57 use self::TestEvent
::*;
58 use self::NamePadding
::*;
59 use self::OutputLocation
::*;
62 use getopts
::{OptGroup, optflag, optopt}
;
63 use serialize
::Encodable
;
64 use std
::boxed
::FnBox
;
66 use term
::color
::{Color, RED, YELLOW, GREEN, CYAN}
;
70 use std
::collections
::BTreeMap
;
74 use std
::io
::prelude
::*;
76 use std
::iter
::repeat
;
77 use std
::path
::PathBuf
;
78 use std
::sync
::mpsc
::{channel, Sender}
;
79 use std
::sync
::{Arc, Mutex}
;
81 use std
::time
::{Instant, Duration}
;
83 // to be used by rustc to compile tests in libtest
85 pub use {Bencher
, TestName
, TestResult
, TestDesc
,
86 TestDescAndFn
, TestOpts
, TrFailed
, TrIgnored
, TrOk
,
88 StaticTestFn
, StaticTestName
, DynTestName
, DynTestFn
,
89 run_test
, test_main
, test_main_static
, filter_tests
,
90 parse_opts
, StaticBenchFn
, ShouldPanic
};
95 // The name of a test. By convention this follows the rules for rust
96 // paths; i.e. it should be a series of identifiers separated by double
97 // colons. This way if some test runner wants to arrange the tests
98 // hierarchically it may.
100 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
102 StaticTestName(&'
static str),
106 fn as_slice(&self) -> &str {
108 StaticTestName(s
) => s
,
109 DynTestName(ref s
) => s
113 impl fmt
::Display
for TestName
{
114 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
115 fmt
::Display
::fmt(self.as_slice(), f
)
119 #[derive(Clone, Copy)]
126 fn padded_name(&self, column_count
: usize, align
: NamePadding
) -> String
{
127 let mut name
= String
::from(self.name
.as_slice());
128 let fill
= column_count
.saturating_sub(name
.len());
129 let pad
= repeat(" ").take(fill
).collect
::<String
>();
140 /// Represents a benchmark function.
141 pub trait TDynBenchFn
: Send
{
142 fn run(&self, harness
: &mut Bencher
);
145 // A function that runs a test. If the function returns successfully,
146 // the test succeeds; if the function panics then the test fails. We
147 // may need to come up with a more clever definition of test in order
148 // to support isolation of tests into threads.
151 StaticBenchFn(fn(&mut Bencher
)),
152 StaticMetricFn(fn(&mut MetricMap
)),
153 DynTestFn(Box
<FnBox() + Send
>),
154 DynMetricFn(Box
<FnBox(&mut MetricMap
)+Send
>),
155 DynBenchFn(Box
<TDynBenchFn
+'
static>)
159 fn padding(&self) -> NamePadding
{
161 StaticTestFn(..) => PadNone
,
162 StaticBenchFn(..) => PadOnRight
,
163 StaticMetricFn(..) => PadOnRight
,
164 DynTestFn(..) => PadNone
,
165 DynMetricFn(..) => PadOnRight
,
166 DynBenchFn(..) => PadOnRight
,
171 impl fmt
::Debug
for TestFn
{
172 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
173 f
.write_str(match *self {
174 StaticTestFn(..) => "StaticTestFn(..)",
175 StaticBenchFn(..) => "StaticBenchFn(..)",
176 StaticMetricFn(..) => "StaticMetricFn(..)",
177 DynTestFn(..) => "DynTestFn(..)",
178 DynMetricFn(..) => "DynMetricFn(..)",
179 DynBenchFn(..) => "DynBenchFn(..)"
184 /// Manager of the benchmarking runs.
186 /// This is fed into functions marked with `#[bench]` to allow for
187 /// set-up & tear-down before running a piece of code repeatedly via a
189 #[derive(Copy, Clone)]
196 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
197 pub enum ShouldPanic
{
200 YesWithMessage(&'
static str)
203 // The definition of a single test. A test runner will run a list of
205 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
206 pub struct TestDesc
{
209 pub should_panic
: ShouldPanic
,
212 unsafe impl Send
for TestDesc {}
215 pub struct TestDescAndFn
{
220 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
227 pub fn new(value
: f64, noise
: f64) -> Metric
{
228 Metric {value: value, noise: noise}
233 pub struct MetricMap(BTreeMap
<String
,Metric
>);
235 impl Clone
for MetricMap
{
236 fn clone(&self) -> MetricMap
{
237 let MetricMap(ref map
) = *self;
238 MetricMap(map
.clone())
242 // The default console test runner. It accepts the command line
243 // arguments and a vector of test_descs.
244 pub fn test_main(args
: &[String
], tests
: Vec
<TestDescAndFn
> ) {
246 match parse_opts(args
) {
248 Some(Err(msg
)) => panic
!("{:?}", msg
),
251 match run_tests_console(&opts
, tests
) {
253 Ok(false) => std
::process
::exit(101),
254 Err(e
) => panic
!("io error when running tests: {:?}", e
),
258 // A variant optimized for invocation with a static test vector.
259 // This will panic (intentionally) when fed any dynamic tests, because
260 // it is copying the static values out into a dynamic vector and cannot
261 // copy dynamic values. It is doing this because from this point on
262 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
263 // semantics into parallel test runners, which in turn requires a Vec<>
264 // rather than a &[].
265 pub fn test_main_static(tests
: &[TestDescAndFn
]) {
266 let args
= env
::args().collect
::<Vec
<_
>>();
267 let owned_tests
= tests
.iter().map(|t
| {
269 StaticTestFn(f
) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() }
,
270 StaticBenchFn(f
) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() }
,
271 _
=> panic
!("non-static tests passed to test::test_main_static")
274 test_main(&args
, owned_tests
)
277 #[derive(Copy, Clone)]
278 pub enum ColorConfig
{
284 pub struct TestOpts
{
285 pub filter
: Option
<String
>,
286 pub run_ignored
: bool
,
288 pub bench_benchmarks
: bool
,
289 pub logfile
: Option
<PathBuf
>,
291 pub color
: ColorConfig
,
296 fn new() -> TestOpts
{
301 bench_benchmarks
: false,
309 /// Result of parsing the options.
310 pub type OptRes
= Result
<TestOpts
, String
>;
312 fn optgroups() -> Vec
<getopts
::OptGroup
> {
313 vec
!(getopts
::optflag("", "ignored", "Run ignored tests"),
314 getopts
::optflag("", "test", "Run tests and not benchmarks"),
315 getopts
::optflag("", "bench", "Run benchmarks instead of tests"),
316 getopts
::optflag("h", "help", "Display this message (longer with --help)"),
317 getopts
::optopt("", "logfile", "Write logs to the specified file instead \
319 getopts
::optflag("", "nocapture", "don't capture stdout/stderr of each \
320 task, allow printing directly"),
321 getopts
::optopt("", "color", "Configure coloring of output:
322 auto = colorize if stdout is a tty and tests are run on serially (default);
323 always = always colorize output;
324 never = never colorize output;", "auto|always|never"))
327 fn usage(binary
: &str) {
328 let message
= format
!("Usage: {} [OPTIONS] [FILTER]", binary
);
331 The FILTER regex is tested against the name of all tests to run, and
332 only those tests that match are run.
334 By default, all tests are run in parallel. This can be altered with the
335 RUST_TEST_THREADS environment variable when running tests (set it to 1).
337 All tests have their standard output and standard error captured by default.
338 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
339 environment variable. Logging is not captured by default.
343 #[test] - Indicates a function is a test to be run. This function
345 #[bench] - Indicates a function is a benchmark to be run. This
346 function takes one argument (test::Bencher).
347 #[should_panic] - This function (also labeled with #[test]) will only pass if
348 the code causes a panic (an assertion failure or panic!)
349 A message may be provided, which the failure string must
350 contain: #[should_panic(expected = "foo")].
351 #[ignore] - When applied to a function which is already attributed as a
352 test, then the test runner will ignore these tests during
353 normal test runs. Running with --ignored will run these
355 usage
= getopts
::usage(&message
, &optgroups()));
358 // Parses command line arguments into test options
359 pub fn parse_opts(args
: &[String
]) -> Option
<OptRes
> {
360 let args_
= &args
[1..];
362 match getopts
::getopts(args_
, &optgroups()) {
364 Err(f
) => return Some(Err(f
.to_string()))
367 if matches
.opt_present("h") { usage(&args[0]); return None; }
369 let filter
= if !matches
.free
.is_empty() {
370 Some(matches
.free
[0].clone())
375 let run_ignored
= matches
.opt_present("ignored");
377 let logfile
= matches
.opt_str("logfile");
378 let logfile
= logfile
.map(|s
| PathBuf
::from(&s
));
380 let bench_benchmarks
= matches
.opt_present("bench");
381 let run_tests
= ! bench_benchmarks
||
382 matches
.opt_present("test");
384 let mut nocapture
= matches
.opt_present("nocapture");
386 nocapture
= env
::var("RUST_TEST_NOCAPTURE").is_ok();
389 let color
= match matches
.opt_str("color").as_ref().map(|s
| &**s
) {
390 Some("auto") | None
=> AutoColor
,
391 Some("always") => AlwaysColor
,
392 Some("never") => NeverColor
,
394 Some(v
) => return Some(Err(format
!("argument for --color must be \
395 auto, always, or never (was {})",
399 let test_opts
= TestOpts
{
401 run_ignored
: run_ignored
,
402 run_tests
: run_tests
,
403 bench_benchmarks
: bench_benchmarks
,
405 nocapture
: nocapture
,
412 #[derive(Clone, PartialEq)]
413 pub struct BenchSamples
{
414 ns_iter_summ
: stats
::Summary
,
418 #[derive(Clone, PartialEq)]
419 pub enum TestResult
{
423 TrMetrics(MetricMap
),
424 TrBench(BenchSamples
),
427 unsafe impl Send
for TestResult {}
429 enum OutputLocation
<T
> {
430 Pretty(Box
<term
::StdoutTerminal
>),
434 struct ConsoleTestState
<T
> {
435 log_out
: Option
<File
>,
436 out
: OutputLocation
<T
>,
444 failures
: Vec
<(TestDesc
, Vec
<u8> )> ,
445 max_name_len
: usize, // number of columns to fill when aligning names
448 impl<T
: Write
> ConsoleTestState
<T
> {
449 pub fn new(opts
: &TestOpts
,
450 _
: Option
<T
>) -> io
::Result
<ConsoleTestState
<io
::Stdout
>> {
451 let log_out
= match opts
.logfile
{
452 Some(ref path
) => Some(try
!(File
::create(path
))),
455 let out
= match term
::stdout() {
456 None
=> Raw(io
::stdout()),
460 Ok(ConsoleTestState
{
463 use_color
: use_color(opts
),
469 metrics
: MetricMap
::new(),
470 failures
: Vec
::new(),
475 pub fn write_ok(&mut self) -> io
::Result
<()> {
476 self.write_pretty("ok", term
::color
::GREEN
)
479 pub fn write_failed(&mut self) -> io
::Result
<()> {
480 self.write_pretty("FAILED", term
::color
::RED
)
483 pub fn write_ignored(&mut self) -> io
::Result
<()> {
484 self.write_pretty("ignored", term
::color
::YELLOW
)
487 pub fn write_metric(&mut self) -> io
::Result
<()> {
488 self.write_pretty("metric", term
::color
::CYAN
)
491 pub fn write_bench(&mut self) -> io
::Result
<()> {
492 self.write_pretty("bench", term
::color
::CYAN
)
495 pub fn write_pretty(&mut self,
497 color
: term
::color
::Color
) -> io
::Result
<()> {
499 Pretty(ref mut term
) => {
501 try
!(term
.fg(color
));
503 try
!(term
.write_all(word
.as_bytes()));
509 Raw(ref mut stdout
) => {
510 try
!(stdout
.write_all(word
.as_bytes()));
516 pub fn write_plain(&mut self, s
: &str) -> io
::Result
<()> {
518 Pretty(ref mut term
) => {
519 try
!(term
.write_all(s
.as_bytes()));
522 Raw(ref mut stdout
) => {
523 try
!(stdout
.write_all(s
.as_bytes()));
529 pub fn write_run_start(&mut self, len
: usize) -> io
::Result
<()> {
531 let noun
= if len
!= 1 { "tests" }
else { "test" }
;
532 self.write_plain(&format
!("\nrunning {} {}\n", len
, noun
))
535 pub fn write_test_start(&mut self, test
: &TestDesc
,
536 align
: NamePadding
) -> io
::Result
<()> {
537 let name
= test
.padded_name(self.max_name_len
, align
);
538 self.write_plain(&format
!("test {} ... ", name
))
541 pub fn write_result(&mut self, result
: &TestResult
) -> io
::Result
<()> {
543 TrOk
=> self.write_ok(),
544 TrFailed
=> self.write_failed(),
545 TrIgnored
=> self.write_ignored(),
546 TrMetrics(ref mm
) => {
547 try
!(self.write_metric());
548 self.write_plain(&format
!(": {}", mm
.fmt_metrics()))
551 try
!(self.write_bench());
553 try
!(self.write_plain(&format
!(": {}", fmt_bench_samples(bs
))));
558 self.write_plain("\n")
561 pub fn write_log(&mut self, test
: &TestDesc
,
562 result
: &TestResult
) -> io
::Result
<()> {
566 let s
= format
!("{} {}\n", match *result
{
567 TrOk
=> "ok".to_owned(),
568 TrFailed
=> "failed".to_owned(),
569 TrIgnored
=> "ignored".to_owned(),
570 TrMetrics(ref mm
) => mm
.fmt_metrics(),
571 TrBench(ref bs
) => fmt_bench_samples(bs
)
573 o
.write_all(s
.as_bytes())
578 pub fn write_failures(&mut self) -> io
::Result
<()> {
579 try
!(self.write_plain("\nfailures:\n"));
580 let mut failures
= Vec
::new();
581 let mut fail_out
= String
::new();
582 for &(ref f
, ref stdout
) in &self.failures
{
583 failures
.push(f
.name
.to_string());
584 if !stdout
.is_empty() {
585 fail_out
.push_str(&format
!("---- {} stdout ----\n\t", f
.name
));
586 let output
= String
::from_utf8_lossy(stdout
);
587 fail_out
.push_str(&output
);
588 fail_out
.push_str("\n");
591 if !fail_out
.is_empty() {
592 try
!(self.write_plain("\n"));
593 try
!(self.write_plain(&fail_out
));
596 try
!(self.write_plain("\nfailures:\n"));
598 for name
in &failures
{
599 try
!(self.write_plain(&format
!(" {}\n", name
)));
604 pub fn write_run_finish(&mut self) -> io
::Result
<bool
> {
605 assert
!(self.passed
+ self.failed
+ self.ignored
+ self.measured
== self.total
);
607 let success
= self.failed
== 0;
609 try
!(self.write_failures());
612 try
!(self.write_plain("\ntest result: "));
614 // There's no parallelism at this point so it's safe to use color
615 try
!(self.write_ok());
617 try
!(self.write_failed());
619 let s
= format
!(". {} passed; {} failed; {} ignored; {} measured\n\n",
620 self.passed
, self.failed
, self.ignored
, self.measured
);
621 try
!(self.write_plain(&s
));
626 // Format a number with thousands separators
627 fn fmt_thousands_sep(mut n
: usize, sep
: char) -> String
{
629 let mut output
= String
::new();
630 let mut trailing
= false;
631 for &pow
in &[9, 6, 3, 0] {
632 let base
= 10_usize
.pow(pow
);
633 if pow
== 0 || trailing
|| n
/ base
!= 0 {
635 output
.write_fmt(format_args
!("{}", n
/ base
)).unwrap();
637 output
.write_fmt(format_args
!("{:03}", n
/ base
)).unwrap();
650 pub fn fmt_bench_samples(bs
: &BenchSamples
) -> String
{
652 let mut output
= String
::new();
654 let median
= bs
.ns_iter_summ
.median
as usize;
655 let deviation
= (bs
.ns_iter_summ
.max
- bs
.ns_iter_summ
.min
) as usize;
657 output
.write_fmt(format_args
!("{:>11} ns/iter (+/- {})",
658 fmt_thousands_sep(median
, '
,'
),
659 fmt_thousands_sep(deviation
, '
,'
))).unwrap();
661 output
.write_fmt(format_args
!(" = {} MB/s", bs
.mb_s
)).unwrap();
666 // A simple console test runner
667 pub fn run_tests_console(opts
: &TestOpts
, tests
: Vec
<TestDescAndFn
> ) -> io
::Result
<bool
> {
669 fn callback
<T
: Write
>(event
: &TestEvent
,
670 st
: &mut ConsoleTestState
<T
>) -> io
::Result
<()> {
671 match (*event
).clone() {
672 TeFiltered(ref filtered_tests
) => st
.write_run_start(filtered_tests
.len()),
673 TeWait(ref test
, padding
) => st
.write_test_start(test
, padding
),
674 TeResult(test
, result
, stdout
) => {
675 try
!(st
.write_log(&test
, &result
));
676 try
!(st
.write_result(&result
));
678 TrOk
=> st
.passed
+= 1,
679 TrIgnored
=> st
.ignored
+= 1,
681 let tname
= test
.name
;
682 let MetricMap(mm
) = mm
;
685 .insert_metric(&format
!("{}.{}",
694 st
.metrics
.insert_metric(test
.name
.as_slice(),
695 bs
.ns_iter_summ
.median
,
696 bs
.ns_iter_summ
.max
- bs
.ns_iter_summ
.min
);
701 st
.failures
.push((test
, stdout
));
709 let mut st
= try
!(ConsoleTestState
::new(opts
, None
::<io
::Stdout
>));
710 fn len_if_padded(t
: &TestDescAndFn
) -> usize {
711 match t
.testfn
.padding() {
713 PadOnRight
=> t
.desc
.name
.as_slice().len(),
716 match tests
.iter().max_by_key(|t
|len_if_padded(*t
)) {
718 let n
= t
.desc
.name
.as_slice();
719 st
.max_name_len
= n
.len();
723 try
!(run_tests(opts
, tests
, |x
| callback(&x
, &mut st
)));
724 return st
.write_run_finish();
728 fn should_sort_failures_before_printing_them() {
729 let test_a
= TestDesc
{
730 name
: StaticTestName("a"),
732 should_panic
: ShouldPanic
::No
735 let test_b
= TestDesc
{
736 name
: StaticTestName("b"),
738 should_panic
: ShouldPanic
::No
741 let mut st
= ConsoleTestState
{
743 out
: Raw(Vec
::new()),
751 metrics
: MetricMap
::new(),
752 failures
: vec
!((test_b
, Vec
::new()), (test_a
, Vec
::new()))
755 st
.write_failures().unwrap();
756 let s
= match st
.out
{
757 Raw(ref m
) => String
::from_utf8_lossy(&m
[..]),
758 Pretty(_
) => unreachable
!()
761 let apos
= s
.find("a").unwrap();
762 let bpos
= s
.find("b").unwrap();
763 assert
!(apos
< bpos
);
766 fn use_color(opts
: &TestOpts
) -> bool
{
768 AutoColor
=> !opts
.nocapture
&& stdout_isatty(),
775 fn stdout_isatty() -> bool
{
776 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
779 fn stdout_isatty() -> bool
{
782 type HANDLE
= *mut u8;
783 type LPDWORD
= *mut u32;
784 const STD_OUTPUT_HANDLE
: DWORD
= -11i32 as DWORD
;
786 fn GetStdHandle(which
: DWORD
) -> HANDLE
;
787 fn GetConsoleMode(hConsoleHandle
: HANDLE
, lpMode
: LPDWORD
) -> BOOL
;
790 let handle
= GetStdHandle(STD_OUTPUT_HANDLE
);
792 GetConsoleMode(handle
, &mut out
) != 0
798 TeFiltered(Vec
<TestDesc
> ),
799 TeWait(TestDesc
, NamePadding
),
800 TeResult(TestDesc
, TestResult
, Vec
<u8> ),
803 pub type MonitorMsg
= (TestDesc
, TestResult
, Vec
<u8> );
806 fn run_tests
<F
>(opts
: &TestOpts
,
807 tests
: Vec
<TestDescAndFn
> ,
808 mut callback
: F
) -> io
::Result
<()> where
809 F
: FnMut(TestEvent
) -> io
::Result
<()>,
811 let mut filtered_tests
= filter_tests(opts
, tests
);
812 if !opts
.bench_benchmarks
{
813 filtered_tests
= convert_benchmarks_to_tests(filtered_tests
);
816 let filtered_descs
= filtered_tests
.iter()
817 .map(|t
| t
.desc
.clone())
820 try
!(callback(TeFiltered(filtered_descs
)));
822 let (filtered_tests
, filtered_benchs_and_metrics
): (Vec
<_
>, _
) =
823 filtered_tests
.into_iter().partition(|e
| {
825 StaticTestFn(_
) | DynTestFn(_
) => true,
830 // It's tempting to just spawn all the tests at once, but since we have
831 // many tests that run in other processes we would be making a big mess.
832 let concurrency
= get_concurrency();
834 let mut remaining
= filtered_tests
;
838 let (tx
, rx
) = channel
::<MonitorMsg
>();
840 while pending
> 0 || !remaining
.is_empty() {
841 while pending
< concurrency
&& !remaining
.is_empty() {
842 let test
= remaining
.pop().unwrap();
843 if concurrency
== 1 {
844 // We are doing one test at a time so we can print the name
845 // of the test before we run it. Useful for debugging tests
846 // that hang forever.
847 try
!(callback(TeWait(test
.desc
.clone(), test
.testfn
.padding())));
849 run_test(opts
, !opts
.run_tests
, test
, tx
.clone());
853 let (desc
, result
, stdout
) = rx
.recv().unwrap();
854 if concurrency
!= 1 {
855 try
!(callback(TeWait(desc
.clone(), PadNone
)));
857 try
!(callback(TeResult(desc
, result
, stdout
)));
861 if opts
.bench_benchmarks
{
862 // All benchmarks run at the end, in serial.
863 // (this includes metric fns)
864 for b
in filtered_benchs_and_metrics
{
865 try
!(callback(TeWait(b
.desc
.clone(), b
.testfn
.padding())));
866 run_test(opts
, false, b
, tx
.clone());
867 let (test
, result
, stdout
) = rx
.recv().unwrap();
868 try
!(callback(TeResult(test
, result
, stdout
)));
875 fn get_concurrency() -> usize {
876 return match env
::var("RUST_TEST_THREADS") {
878 let opt_n
: Option
<usize> = s
.parse().ok();
880 Some(n
) if n
> 0 => n
,
881 _
=> panic
!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s
)
884 Err(..) => num_cpus(),
889 fn num_cpus() -> usize {
892 wProcessorArchitecture
: u16,
895 lpMinimumApplicationAddress
: *mut u8,
896 lpMaximumApplicationAddress
: *mut u8,
897 dwActiveProcessorMask
: *mut u8,
898 dwNumberOfProcessors
: u32,
899 dwProcessorType
: u32,
900 dwAllocationGranularity
: u32,
901 wProcessorLevel
: u16,
902 wProcessorRevision
: u16,
905 fn GetSystemInfo(info
: *mut SYSTEM_INFO
) -> i32;
908 let mut sysinfo
= std
::mem
::zeroed();
909 GetSystemInfo(&mut sysinfo
);
910 sysinfo
.dwNumberOfProcessors
as usize
915 fn num_cpus() -> usize {
916 extern { fn rust_get_num_cpus() -> libc::uintptr_t; }
917 unsafe { rust_get_num_cpus() as usize }
921 pub fn filter_tests(opts
: &TestOpts
, tests
: Vec
<TestDescAndFn
>) -> Vec
<TestDescAndFn
> {
922 let mut filtered
= tests
;
924 // Remove tests that don't match the test filter
925 filtered
= match opts
.filter
{
927 Some(ref filter
) => {
928 filtered
.into_iter().filter(|test
| {
929 test
.desc
.name
.as_slice().contains(&filter
[..])
934 // Maybe pull out the ignored test and unignore them
935 filtered
= if !opts
.run_ignored
{
938 fn filter(test
: TestDescAndFn
) -> Option
<TestDescAndFn
> {
939 if test
.desc
.ignore
{
940 let TestDescAndFn {desc, testfn}
= test
;
942 desc
: TestDesc {ignore: false, ..desc}
,
949 filtered
.into_iter().filter_map(filter
).collect()
952 // Sort the tests alphabetically
953 filtered
.sort_by(|t1
, t2
| t1
.desc
.name
.as_slice().cmp(t2
.desc
.name
.as_slice()));
958 pub fn convert_benchmarks_to_tests(tests
: Vec
<TestDescAndFn
>) -> Vec
<TestDescAndFn
> {
959 // convert benchmarks to tests, if we're not benchmarking them
960 tests
.into_iter().map(|x
| {
961 let testfn
= match x
.testfn
{
962 DynBenchFn(bench
) => {
963 DynTestFn(Box
::new(move || bench
::run_once(|b
| bench
.run(b
))))
965 StaticBenchFn(benchfn
) => {
966 DynTestFn(Box
::new(move || bench
::run_once(|b
| benchfn(b
))))
970 TestDescAndFn { desc: x.desc, testfn: testfn }
974 pub fn run_test(opts
: &TestOpts
,
977 monitor_ch
: Sender
<MonitorMsg
>) {
979 let TestDescAndFn {desc, testfn}
= test
;
981 if force_ignore
|| desc
.ignore
{
982 monitor_ch
.send((desc
, TrIgnored
, Vec
::new())).unwrap();
986 fn run_test_inner(desc
: TestDesc
,
987 monitor_ch
: Sender
<MonitorMsg
>,
989 testfn
: Box
<FnBox() + Send
>) {
990 struct Sink(Arc
<Mutex
<Vec
<u8>>>);
991 impl Write
for Sink
{
992 fn write(&mut self, data
: &[u8]) -> io
::Result
<usize> {
993 Write
::write(&mut *self.0.lock().unwrap(), data
)
995 fn flush(&mut self) -> io
::Result
<()> { Ok(()) }
998 thread
::spawn(move || {
999 let data
= Arc
::new(Mutex
::new(Vec
::new()));
1000 let data2
= data
.clone();
1001 let cfg
= thread
::Builder
::new().name(match desc
.name
{
1002 DynTestName(ref name
) => name
.clone(),
1003 StaticTestName(name
) => name
.to_owned(),
1006 let result_guard
= cfg
.spawn(move || {
1008 io
::set_print(box Sink(data2
.clone()));
1009 io
::set_panic(box Sink(data2
));
1013 let test_result
= calc_result(&desc
, result_guard
.join());
1014 let stdout
= data
.lock().unwrap().to_vec();
1015 monitor_ch
.send((desc
.clone(), test_result
, stdout
)).unwrap();
1020 DynBenchFn(bencher
) => {
1021 let bs
= ::bench
::benchmark(|harness
| bencher
.run(harness
));
1022 monitor_ch
.send((desc
, TrBench(bs
), Vec
::new())).unwrap();
1025 StaticBenchFn(benchfn
) => {
1026 let bs
= ::bench
::benchmark(|harness
| (benchfn
.clone())(harness
));
1027 monitor_ch
.send((desc
, TrBench(bs
), Vec
::new())).unwrap();
1031 let mut mm
= MetricMap
::new();
1032 f
.call_box((&mut mm
,));
1033 monitor_ch
.send((desc
, TrMetrics(mm
), Vec
::new())).unwrap();
1036 StaticMetricFn(f
) => {
1037 let mut mm
= MetricMap
::new();
1039 monitor_ch
.send((desc
, TrMetrics(mm
), Vec
::new())).unwrap();
1042 DynTestFn(f
) => run_test_inner(desc
, monitor_ch
, opts
.nocapture
, f
),
1043 StaticTestFn(f
) => run_test_inner(desc
, monitor_ch
, opts
.nocapture
,
1048 fn calc_result(desc
: &TestDesc
, task_result
: Result
<(), Box
<Any
+Send
>>) -> TestResult
{
1049 match (&desc
.should_panic
, task_result
) {
1050 (&ShouldPanic
::No
, Ok(())) |
1051 (&ShouldPanic
::Yes
, Err(_
)) => TrOk
,
1052 (&ShouldPanic
::YesWithMessage(msg
), Err(ref err
))
1053 if err
.downcast_ref
::<String
>()
1055 .or_else(|| err
.downcast_ref
::<&'
static str>().map(|e
| *e
))
1056 .map(|e
| e
.contains(msg
))
1057 .unwrap_or(false) => TrOk
,
1064 pub fn new() -> MetricMap
{
1065 MetricMap(BTreeMap
::new())
1068 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1069 /// must be non-negative. The `noise` indicates the uncertainty of the
1070 /// metric, which doubles as the "noise range" of acceptable
1071 /// pairwise-regressions on this named value, when comparing from one
1072 /// metric to the next using `compare_to_old`.
1074 /// If `noise` is positive, then it means this metric is of a value
1075 /// you want to see grow smaller, so a change larger than `noise` in the
1076 /// positive direction represents a regression.
1078 /// If `noise` is negative, then it means this metric is of a value
1079 /// you want to see grow larger, so a change larger than `noise` in the
1080 /// negative direction represents a regression.
1081 pub fn insert_metric(&mut self, name
: &str, value
: f64, noise
: f64) {
1086 let MetricMap(ref mut map
) = *self;
1087 map
.insert(name
.to_owned(), m
);
1090 pub fn fmt_metrics(&self) -> String
{
1091 let MetricMap(ref mm
) = *self;
1092 let v
: Vec
<String
> = mm
.iter()
1093 .map(|(k
,v
)| format
!("{}: {} (+/- {})", *k
,
1103 /// A function that is opaque to the optimizer, to allow benchmarks to
1104 /// pretend to use outputs to assist in avoiding dead-code
1107 /// This function is a no-op, and does not even read from `dummy`.
1108 #[cfg(not(all(target_os = "nacl", target_arch = "le32")))]
1109 pub fn black_box
<T
>(dummy
: T
) -> T
{
1110 // we need to "use" the argument in some way LLVM can't
1112 unsafe {asm!("" : : "r"(&dummy))}
1115 #[cfg(all(target_os = "nacl", target_arch = "le32"))]
1117 pub fn black_box
<T
>(dummy
: T
) -> T { dummy }
1121 /// Callback for benchmark functions to run in their body.
1122 pub fn iter
<T
, F
>(&mut self, mut inner
: F
) where F
: FnMut() -> T
{
1123 let start
= Instant
::now();
1124 let k
= self.iterations
;
1128 self.dur
= start
.elapsed();
1131 pub fn ns_elapsed(&mut self) -> u64 {
1132 self.dur
.as_secs() * 1_000_000_000 + (self.dur
.subsec_nanos() as u64)
1135 pub fn ns_per_iter(&mut self) -> u64 {
1136 if self.iterations
== 0 {
1139 self.ns_elapsed() / cmp
::max(self.iterations
, 1)
1143 pub fn bench_n
<F
>(&mut self, n
: u64, f
: F
) where F
: FnOnce(&mut Bencher
) {
1144 self.iterations
= n
;
1148 // This is a more statistics-driven benchmark algorithm
1149 pub fn auto_bench
<F
>(&mut self, mut f
: F
) -> stats
::Summary
where F
: FnMut(&mut Bencher
) {
1150 // Initial bench run to get ballpark figure.
1152 self.bench_n(n
, |x
| f(x
));
1154 // Try to estimate iter count for 1ms falling back to 1m
1155 // iterations if first run took < 1ns.
1156 if self.ns_per_iter() == 0 {
1159 n
= 1_000_000 / cmp
::max(self.ns_per_iter(), 1);
1161 // if the first run took more than 1ms we don't want to just
1162 // be left doing 0 iterations on every loop. The unfortunate
1163 // side effect of not being able to do as many runs is
1164 // automatically handled by the statistical analysis below
1165 // (i.e. larger error bars).
1166 if n
== 0 { n = 1; }
1168 let mut total_run
= Duration
::new(0, 0);
1169 let samples
: &mut [f64] = &mut [0.0_f64; 50];
1171 let loop_start
= Instant
::now();
1173 for p
in &mut *samples
{
1174 self.bench_n(n
, |x
| f(x
));
1175 *p
= self.ns_per_iter() as f64;
1178 stats
::winsorize(samples
, 5.0);
1179 let summ
= stats
::Summary
::new(samples
);
1181 for p
in &mut *samples
{
1182 self.bench_n(5 * n
, |x
| f(x
));
1183 *p
= self.ns_per_iter() as f64;
1186 stats
::winsorize(samples
, 5.0);
1187 let summ5
= stats
::Summary
::new(samples
);
1188 let loop_run
= loop_start
.elapsed();
1190 // If we've run for 100ms and seem to have converged to a
1192 if loop_run
> Duration
::from_millis(100) &&
1193 summ
.median_abs_dev_pct
< 1.0 &&
1194 summ
.median
- summ5
.median
< summ5
.median_abs_dev
{
1198 total_run
= total_run
+ loop_run
;
1199 // Longest we ever run for is 3s.
1200 if total_run
> Duration
::from_secs(3) {
1204 // If we overflow here just return the results so far. We check a
1205 // multiplier of 10 because we're about to multiply by 2 and the
1206 // next iteration of the loop will also multiply by 5 (to calculate
1207 // the summ5 result)
1208 n
= match n
.checked_mul(10) {
1210 None
=> return summ5
,
1218 use std
::time
::Duration
;
1219 use super::{Bencher, BenchSamples}
;
1221 pub fn benchmark
<F
>(f
: F
) -> BenchSamples
where F
: FnMut(&mut Bencher
) {
1222 let mut bs
= Bencher
{
1224 dur
: Duration
::new(0, 0),
1228 let ns_iter_summ
= bs
.auto_bench(f
);
1230 let ns_iter
= cmp
::max(ns_iter_summ
.median
as u64, 1);
1231 let iter_s
= 1_000_000_000 / ns_iter
;
1232 let mb_s
= (bs
.bytes
* iter_s
) / 1_000_000;
1235 ns_iter_summ
: ns_iter_summ
,
1240 pub fn run_once
<F
>(f
: F
) where F
: FnOnce(&mut Bencher
) {
1241 let mut bs
= Bencher
{
1243 dur
: Duration
::new(0, 0),
1252 use test
::{TrFailed
, TrIgnored
, TrOk
, filter_tests
, parse_opts
,
1253 TestDesc
, TestDescAndFn
, TestOpts
, run_test
,
1255 StaticTestName
, DynTestName
, DynTestFn
, ShouldPanic
};
1256 use std
::sync
::mpsc
::channel
;
1259 pub fn do_not_run_ignored_tests() {
1260 fn f() { panic!(); }
1261 let desc
= TestDescAndFn
{
1263 name
: StaticTestName("whatever"),
1265 should_panic
: ShouldPanic
::No
,
1267 testfn
: DynTestFn(Box
::new(move|| f())),
1269 let (tx
, rx
) = channel();
1270 run_test(&TestOpts
::new(), false, desc
, tx
);
1271 let (_
, res
, _
) = rx
.recv().unwrap();
1272 assert
!(res
!= TrOk
);
1276 pub fn ignored_tests_result_in_ignored() {
1278 let desc
= TestDescAndFn
{
1280 name
: StaticTestName("whatever"),
1282 should_panic
: ShouldPanic
::No
,
1284 testfn
: DynTestFn(Box
::new(move|| f())),
1286 let (tx
, rx
) = channel();
1287 run_test(&TestOpts
::new(), false, desc
, tx
);
1288 let (_
, res
, _
) = rx
.recv().unwrap();
1289 assert
!(res
== TrIgnored
);
1293 fn test_should_panic() {
1294 fn f() { panic!(); }
1295 let desc
= TestDescAndFn
{
1297 name
: StaticTestName("whatever"),
1299 should_panic
: ShouldPanic
::Yes
,
1301 testfn
: DynTestFn(Box
::new(move|| f())),
1303 let (tx
, rx
) = channel();
1304 run_test(&TestOpts
::new(), false, desc
, tx
);
1305 let (_
, res
, _
) = rx
.recv().unwrap();
1306 assert
!(res
== TrOk
);
1310 fn test_should_panic_good_message() {
1311 fn f() { panic!("an error message"); }
1312 let desc
= TestDescAndFn
{
1314 name
: StaticTestName("whatever"),
1316 should_panic
: ShouldPanic
::YesWithMessage("error message"),
1318 testfn
: DynTestFn(Box
::new(move|| f())),
1320 let (tx
, rx
) = channel();
1321 run_test(&TestOpts
::new(), false, desc
, tx
);
1322 let (_
, res
, _
) = rx
.recv().unwrap();
1323 assert
!(res
== TrOk
);
1327 fn test_should_panic_bad_message() {
1328 fn f() { panic!("an error message"); }
1329 let desc
= TestDescAndFn
{
1331 name
: StaticTestName("whatever"),
1333 should_panic
: ShouldPanic
::YesWithMessage("foobar"),
1335 testfn
: DynTestFn(Box
::new(move|| f())),
1337 let (tx
, rx
) = channel();
1338 run_test(&TestOpts
::new(), false, desc
, tx
);
1339 let (_
, res
, _
) = rx
.recv().unwrap();
1340 assert
!(res
== TrFailed
);
1344 fn test_should_panic_but_succeeds() {
1346 let desc
= TestDescAndFn
{
1348 name
: StaticTestName("whatever"),
1350 should_panic
: ShouldPanic
::Yes
,
1352 testfn
: DynTestFn(Box
::new(move|| f())),
1354 let (tx
, rx
) = channel();
1355 run_test(&TestOpts
::new(), false, desc
, tx
);
1356 let (_
, res
, _
) = rx
.recv().unwrap();
1357 assert
!(res
== TrFailed
);
1361 fn parse_ignored_flag() {
1362 let args
= vec
!("progname".to_string(),
1363 "filter".to_string(),
1364 "--ignored".to_string());
1365 let opts
= match parse_opts(&args
) {
1367 _
=> panic
!("Malformed arg in parse_ignored_flag")
1369 assert
!((opts
.run_ignored
));
1373 pub fn filter_for_ignored_option() {
1374 // When we run ignored tests the test filter should filter out all the
1375 // unignored tests and flip the ignore flag on the rest to false
1377 let mut opts
= TestOpts
::new();
1378 opts
.run_tests
= true;
1379 opts
.run_ignored
= true;
1384 name
: StaticTestName("1"),
1386 should_panic
: ShouldPanic
::No
,
1388 testfn
: DynTestFn(Box
::new(move|| {}
)),
1392 name
: StaticTestName("2"),
1394 should_panic
: ShouldPanic
::No
,
1396 testfn
: DynTestFn(Box
::new(move|| {}
)),
1398 let filtered
= filter_tests(&opts
, tests
);
1400 assert_eq
!(filtered
.len(), 1);
1401 assert_eq
!(filtered
[0].desc
.name
.to_string(),
1403 assert
!(filtered
[0].desc
.ignore
== false);
1407 pub fn sort_tests() {
1408 let mut opts
= TestOpts
::new();
1409 opts
.run_tests
= true;
1412 vec
!("sha1::test".to_string(),
1413 "isize::test_to_str".to_string(),
1414 "isize::test_pow".to_string(),
1415 "test::do_not_run_ignored_tests".to_string(),
1416 "test::ignored_tests_result_in_ignored".to_string(),
1417 "test::first_free_arg_should_be_a_filter".to_string(),
1418 "test::parse_ignored_flag".to_string(),
1419 "test::filter_for_ignored_option".to_string(),
1420 "test::sort_tests".to_string());
1424 let mut tests
= Vec
::new();
1425 for name
in &names
{
1426 let test
= TestDescAndFn
{
1428 name
: DynTestName((*name
).clone()),
1430 should_panic
: ShouldPanic
::No
,
1432 testfn
: DynTestFn(Box
::new(testfn
)),
1438 let filtered
= filter_tests(&opts
, tests
);
1441 vec
!("isize::test_pow".to_string(),
1442 "isize::test_to_str".to_string(),
1443 "sha1::test".to_string(),
1444 "test::do_not_run_ignored_tests".to_string(),
1445 "test::filter_for_ignored_option".to_string(),
1446 "test::first_free_arg_should_be_a_filter".to_string(),
1447 "test::ignored_tests_result_in_ignored".to_string(),
1448 "test::parse_ignored_flag".to_string(),
1449 "test::sort_tests".to_string());
1451 for (a
, b
) in expected
.iter().zip(filtered
) {
1452 assert
!(*a
== b
.desc
.name
.to_string());
1457 pub fn test_metricmap_compare() {
1458 let mut m1
= MetricMap
::new();
1459 let mut m2
= MetricMap
::new();
1460 m1
.insert_metric("in-both-noise", 1000.0, 200.0);
1461 m2
.insert_metric("in-both-noise", 1100.0, 200.0);
1463 m1
.insert_metric("in-first-noise", 1000.0, 2.0);
1464 m2
.insert_metric("in-second-noise", 1000.0, 2.0);
1466 m1
.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1467 m2
.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1469 m1
.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1470 m2
.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1472 m1
.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1473 m2
.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1475 m1
.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1476 m2
.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);