1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
27 #![cfg_attr(stage0, feature(custom_attribute))]
28 #![crate_name = "test"]
29 #![unstable(feature = "test")]
31 #![crate_type = "rlib"]
32 #![crate_type = "dylib"]
33 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
34 html_favicon_url
= "http://www.rust-lang.org/favicon.ico",
35 html_root_url
= "http://doc.rust-lang.org/nightly/")]
38 #![feature(box_syntax)]
39 #![feature(collections)]
41 #![feature(rustc_private)]
42 #![feature(staged_api)]
45 #![feature(set_stdio)]
47 #![feature(duration_span)]
50 extern crate serialize
;
51 extern crate serialize
as rustc_serialize
;
55 pub use self::TestFn
::*;
56 pub use self::ColorConfig
::*;
57 pub use self::TestResult
::*;
58 pub use self::TestName
::*;
59 use self::TestEvent
::*;
60 use self::NamePadding
::*;
61 use self::OutputLocation
::*;
64 use getopts
::{OptGroup, optflag, optopt}
;
65 use serialize
::Encodable
;
66 use std
::boxed
::FnBox
;
68 use term
::color
::{Color, RED, YELLOW, GREEN, CYAN}
;
72 use std
::collections
::BTreeMap
;
76 use std
::io
::prelude
::*;
78 use std
::iter
::repeat
;
79 use std
::path
::PathBuf
;
80 use std
::sync
::mpsc
::{channel, Sender}
;
81 use std
::sync
::{Arc, Mutex}
;
83 use std
::thunk
::Thunk
;
84 use std
::time
::Duration
;
86 // to be used by rustc to compile tests in libtest
88 pub use {Bencher
, TestName
, TestResult
, TestDesc
,
89 TestDescAndFn
, TestOpts
, TrFailed
, TrIgnored
, TrOk
,
91 StaticTestFn
, StaticTestName
, DynTestName
, DynTestFn
,
92 run_test
, test_main
, test_main_static
, filter_tests
,
93 parse_opts
, StaticBenchFn
, ShouldPanic
};
98 // The name of a test. By convention this follows the rules for rust
99 // paths; i.e. it should be a series of identifiers separated by double
100 // colons. This way if some test runner wants to arrange the tests
101 // hierarchically it may.
103 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
105 StaticTestName(&'
static str),
109 fn as_slice
<'a
>(&'a
self) -> &'a
str {
111 StaticTestName(s
) => s
,
112 DynTestName(ref s
) => s
116 impl fmt
::Display
for TestName
{
117 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
118 fmt
::Display
::fmt(self.as_slice(), f
)
122 #[derive(Clone, Copy)]
129 fn padded_name(&self, column_count
: usize, align
: NamePadding
) -> String
{
130 let mut name
= String
::from_str(self.name
.as_slice());
131 let fill
= column_count
.saturating_sub(name
.len());
132 let pad
= repeat(" ").take(fill
).collect
::<String
>();
143 /// Represents a benchmark function.
144 pub trait TDynBenchFn
: Send
{
145 fn run(&self, harness
: &mut Bencher
);
148 // A function that runs a test. If the function returns successfully,
149 // the test succeeds; if the function panics then the test fails. We
150 // may need to come up with a more clever definition of test in order
151 // to support isolation of tests into threads.
154 StaticBenchFn(fn(&mut Bencher
)),
155 StaticMetricFn(fn(&mut MetricMap
)),
156 DynTestFn(Thunk
<'
static>),
157 DynMetricFn(Box
<FnBox(&mut MetricMap
)+Send
>),
158 DynBenchFn(Box
<TDynBenchFn
+'
static>)
162 fn padding(&self) -> NamePadding
{
164 &StaticTestFn(..) => PadNone
,
165 &StaticBenchFn(..) => PadOnRight
,
166 &StaticMetricFn(..) => PadOnRight
,
167 &DynTestFn(..) => PadNone
,
168 &DynMetricFn(..) => PadOnRight
,
169 &DynBenchFn(..) => PadOnRight
,
174 impl fmt
::Debug
for TestFn
{
175 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
176 f
.write_str(match *self {
177 StaticTestFn(..) => "StaticTestFn(..)",
178 StaticBenchFn(..) => "StaticBenchFn(..)",
179 StaticMetricFn(..) => "StaticMetricFn(..)",
180 DynTestFn(..) => "DynTestFn(..)",
181 DynMetricFn(..) => "DynMetricFn(..)",
182 DynBenchFn(..) => "DynBenchFn(..)"
187 /// Manager of the benchmarking runs.
189 /// This is fed into functions marked with `#[bench]` to allow for
190 /// set-up & tear-down before running a piece of code repeatedly via a
192 #[derive(Copy, Clone)]
199 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
200 pub enum ShouldPanic
{
202 Yes(Option
<&'
static str>)
205 // The definition of a single test. A test runner will run a list of
207 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
208 pub struct TestDesc
{
211 pub should_panic
: ShouldPanic
,
214 unsafe impl Send
for TestDesc {}
217 pub struct TestDescAndFn
{
222 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
229 pub fn new(value
: f64, noise
: f64) -> Metric
{
230 Metric {value: value, noise: noise}
235 pub struct MetricMap(BTreeMap
<String
,Metric
>);
237 impl Clone
for MetricMap
{
238 fn clone(&self) -> MetricMap
{
239 let MetricMap(ref map
) = *self;
240 MetricMap(map
.clone())
244 // The default console test runner. It accepts the command line
245 // arguments and a vector of test_descs.
246 pub fn test_main(args
: &[String
], tests
: Vec
<TestDescAndFn
> ) {
248 match parse_opts(args
) {
250 Some(Err(msg
)) => panic
!("{:?}", msg
),
253 match run_tests_console(&opts
, tests
) {
255 Ok(false) => panic
!("Some tests failed"),
256 Err(e
) => panic
!("io error when running tests: {:?}", e
),
260 // A variant optimized for invocation with a static test vector.
261 // This will panic (intentionally) when fed any dynamic tests, because
262 // it is copying the static values out into a dynamic vector and cannot
263 // copy dynamic values. It is doing this because from this point on
264 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
265 // semantics into parallel test runners, which in turn requires a Vec<>
266 // rather than a &[].
267 pub fn test_main_static(args
: env
::Args
, tests
: &[TestDescAndFn
]) {
268 let args
= args
.collect
::<Vec
<_
>>();
269 let owned_tests
= tests
.iter().map(|t
| {
271 StaticTestFn(f
) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() }
,
272 StaticBenchFn(f
) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() }
,
273 _
=> panic
!("non-static tests passed to test::test_main_static")
276 test_main(&args
, owned_tests
)
279 #[derive(Copy, Clone)]
280 pub enum ColorConfig
{
286 pub struct TestOpts
{
287 pub filter
: Option
<String
>,
288 pub run_ignored
: bool
,
290 pub bench_benchmarks
: bool
,
291 pub logfile
: Option
<PathBuf
>,
293 pub color
: ColorConfig
,
298 fn new() -> TestOpts
{
303 bench_benchmarks
: false,
311 /// Result of parsing the options.
312 pub type OptRes
= Result
<TestOpts
, String
>;
314 fn optgroups() -> Vec
<getopts
::OptGroup
> {
315 vec
!(getopts
::optflag("", "ignored", "Run ignored tests"),
316 getopts
::optflag("", "test", "Run tests and not benchmarks"),
317 getopts
::optflag("", "bench", "Run benchmarks instead of tests"),
318 getopts
::optflag("h", "help", "Display this message (longer with --help)"),
319 getopts
::optopt("", "logfile", "Write logs to the specified file instead \
321 getopts
::optflag("", "nocapture", "don't capture stdout/stderr of each \
322 task, allow printing directly"),
323 getopts
::optopt("", "color", "Configure coloring of output:
324 auto = colorize if stdout is a tty and tests are run on serially (default);
325 always = always colorize output;
326 never = never colorize output;", "auto|always|never"))
329 fn usage(binary
: &str) {
330 let message
= format
!("Usage: {} [OPTIONS] [FILTER]", binary
);
333 The FILTER regex is tested against the name of all tests to run, and
334 only those tests that match are run.
336 By default, all tests are run in parallel. This can be altered with the
337 RUST_TEST_THREADS environment variable when running tests (set it to 1).
339 All tests have their standard output and standard error captured by default.
340 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
341 environment variable. Logging is not captured by default.
345 #[test] - Indicates a function is a test to be run. This function
347 #[bench] - Indicates a function is a benchmark to be run. This
348 function takes one argument (test::Bencher).
349 #[should_panic] - This function (also labeled with #[test]) will only pass if
350 the code causes a panic (an assertion failure or panic!)
351 A message may be provided, which the failure string must
352 contain: #[should_panic(expected = "foo")].
353 #[ignore] - When applied to a function which is already attributed as a
354 test, then the test runner will ignore these tests during
355 normal test runs. Running with --ignored will run these
357 usage
= getopts
::usage(&message
, &optgroups()));
360 // Parses command line arguments into test options
361 pub fn parse_opts(args
: &[String
]) -> Option
<OptRes
> {
362 let args_
= args
.tail();
364 match getopts
::getopts(args_
, &optgroups()) {
366 Err(f
) => return Some(Err(f
.to_string()))
369 if matches
.opt_present("h") { usage(&args[0]); return None; }
371 let filter
= if !matches
.free
.is_empty() {
372 Some(matches
.free
[0].clone())
377 let run_ignored
= matches
.opt_present("ignored");
379 let logfile
= matches
.opt_str("logfile");
380 let logfile
= logfile
.map(|s
| PathBuf
::from(&s
));
382 let bench_benchmarks
= matches
.opt_present("bench");
383 let run_tests
= ! bench_benchmarks
||
384 matches
.opt_present("test");
386 let mut nocapture
= matches
.opt_present("nocapture");
388 nocapture
= env
::var("RUST_TEST_NOCAPTURE").is_ok();
391 let color
= match matches
.opt_str("color").as_ref().map(|s
| &**s
) {
392 Some("auto") | None
=> AutoColor
,
393 Some("always") => AlwaysColor
,
394 Some("never") => NeverColor
,
396 Some(v
) => return Some(Err(format
!("argument for --color must be \
397 auto, always, or never (was {})",
401 let test_opts
= TestOpts
{
403 run_ignored
: run_ignored
,
404 run_tests
: run_tests
,
405 bench_benchmarks
: bench_benchmarks
,
407 nocapture
: nocapture
,
414 #[derive(Clone, PartialEq)]
415 pub struct BenchSamples
{
416 ns_iter_summ
: stats
::Summary
,
420 #[derive(Clone, PartialEq)]
421 pub enum TestResult
{
425 TrMetrics(MetricMap
),
426 TrBench(BenchSamples
),
429 unsafe impl Send
for TestResult {}
431 enum OutputLocation
<T
> {
432 Pretty(Box
<term
::Terminal
<term
::WriterWrapper
> + Send
>),
436 struct ConsoleTestState
<T
> {
437 log_out
: Option
<File
>,
438 out
: OutputLocation
<T
>,
446 failures
: Vec
<(TestDesc
, Vec
<u8> )> ,
447 max_name_len
: usize, // number of columns to fill when aligning names
450 impl<T
: Write
> ConsoleTestState
<T
> {
451 pub fn new(opts
: &TestOpts
,
452 _
: Option
<T
>) -> io
::Result
<ConsoleTestState
<io
::Stdout
>> {
453 let log_out
= match opts
.logfile
{
454 Some(ref path
) => Some(try
!(File
::create(path
))),
457 let out
= match term
::stdout() {
458 None
=> Raw(io
::stdout()),
462 Ok(ConsoleTestState
{
465 use_color
: use_color(opts
),
471 metrics
: MetricMap
::new(),
472 failures
: Vec
::new(),
477 pub fn write_ok(&mut self) -> io
::Result
<()> {
478 self.write_pretty("ok", term
::color
::GREEN
)
481 pub fn write_failed(&mut self) -> io
::Result
<()> {
482 self.write_pretty("FAILED", term
::color
::RED
)
485 pub fn write_ignored(&mut self) -> io
::Result
<()> {
486 self.write_pretty("ignored", term
::color
::YELLOW
)
489 pub fn write_metric(&mut self) -> io
::Result
<()> {
490 self.write_pretty("metric", term
::color
::CYAN
)
493 pub fn write_bench(&mut self) -> io
::Result
<()> {
494 self.write_pretty("bench", term
::color
::CYAN
)
497 pub fn write_pretty(&mut self,
499 color
: term
::color
::Color
) -> io
::Result
<()> {
501 Pretty(ref mut term
) => {
503 try
!(term
.fg(color
));
505 try
!(term
.write_all(word
.as_bytes()));
511 Raw(ref mut stdout
) => {
512 try
!(stdout
.write_all(word
.as_bytes()));
518 pub fn write_plain(&mut self, s
: &str) -> io
::Result
<()> {
520 Pretty(ref mut term
) => {
521 try
!(term
.write_all(s
.as_bytes()));
524 Raw(ref mut stdout
) => {
525 try
!(stdout
.write_all(s
.as_bytes()));
531 pub fn write_run_start(&mut self, len
: usize) -> io
::Result
<()> {
533 let noun
= if len
!= 1 { "tests" }
else { "test" }
;
534 self.write_plain(&format
!("\nrunning {} {}\n", len
, noun
))
537 pub fn write_test_start(&mut self, test
: &TestDesc
,
538 align
: NamePadding
) -> io
::Result
<()> {
539 let name
= test
.padded_name(self.max_name_len
, align
);
540 self.write_plain(&format
!("test {} ... ", name
))
543 pub fn write_result(&mut self, result
: &TestResult
) -> io
::Result
<()> {
545 TrOk
=> self.write_ok(),
546 TrFailed
=> self.write_failed(),
547 TrIgnored
=> self.write_ignored(),
548 TrMetrics(ref mm
) => {
549 try
!(self.write_metric());
550 self.write_plain(&format
!(": {}", mm
.fmt_metrics()))
553 try
!(self.write_bench());
555 try
!(self.write_plain(&format
!(": {}", fmt_bench_samples(bs
))));
560 self.write_plain("\n")
563 pub fn write_log(&mut self, test
: &TestDesc
,
564 result
: &TestResult
) -> io
::Result
<()> {
568 let s
= format
!("{} {}\n", match *result
{
569 TrOk
=> "ok".to_string(),
570 TrFailed
=> "failed".to_string(),
571 TrIgnored
=> "ignored".to_string(),
572 TrMetrics(ref mm
) => mm
.fmt_metrics(),
573 TrBench(ref bs
) => fmt_bench_samples(bs
)
575 o
.write_all(s
.as_bytes())
580 pub fn write_failures(&mut self) -> io
::Result
<()> {
581 try
!(self.write_plain("\nfailures:\n"));
582 let mut failures
= Vec
::new();
583 let mut fail_out
= String
::new();
584 for &(ref f
, ref stdout
) in &self.failures
{
585 failures
.push(f
.name
.to_string());
586 if !stdout
.is_empty() {
587 fail_out
.push_str(&format
!("---- {} stdout ----\n\t", f
.name
));
588 let output
= String
::from_utf8_lossy(stdout
);
589 fail_out
.push_str(&output
);
590 fail_out
.push_str("\n");
593 if !fail_out
.is_empty() {
594 try
!(self.write_plain("\n"));
595 try
!(self.write_plain(&fail_out
));
598 try
!(self.write_plain("\nfailures:\n"));
600 for name
in &failures
{
601 try
!(self.write_plain(&format
!(" {}\n", name
)));
606 pub fn write_run_finish(&mut self) -> io
::Result
<bool
> {
607 assert
!(self.passed
+ self.failed
+ self.ignored
+ self.measured
== self.total
);
609 let success
= self.failed
== 0;
611 try
!(self.write_failures());
614 try
!(self.write_plain("\ntest result: "));
616 // There's no parallelism at this point so it's safe to use color
617 try
!(self.write_ok());
619 try
!(self.write_failed());
621 let s
= format
!(". {} passed; {} failed; {} ignored; {} measured\n\n",
622 self.passed
, self.failed
, self.ignored
, self.measured
);
623 try
!(self.write_plain(&s
));
628 pub fn fmt_bench_samples(bs
: &BenchSamples
) -> String
{
630 format
!("{:>9} ns/iter (+/- {}) = {} MB/s",
631 bs
.ns_iter_summ
.median
as usize,
632 (bs
.ns_iter_summ
.max
- bs
.ns_iter_summ
.min
) as usize,
635 format
!("{:>9} ns/iter (+/- {})",
636 bs
.ns_iter_summ
.median
as usize,
637 (bs
.ns_iter_summ
.max
- bs
.ns_iter_summ
.min
) as usize)
641 // A simple console test runner
642 pub fn run_tests_console(opts
: &TestOpts
, tests
: Vec
<TestDescAndFn
> ) -> io
::Result
<bool
> {
644 fn callback
<T
: Write
>(event
: &TestEvent
,
645 st
: &mut ConsoleTestState
<T
>) -> io
::Result
<()> {
646 match (*event
).clone() {
647 TeFiltered(ref filtered_tests
) => st
.write_run_start(filtered_tests
.len()),
648 TeWait(ref test
, padding
) => st
.write_test_start(test
, padding
),
649 TeResult(test
, result
, stdout
) => {
650 try
!(st
.write_log(&test
, &result
));
651 try
!(st
.write_result(&result
));
653 TrOk
=> st
.passed
+= 1,
654 TrIgnored
=> st
.ignored
+= 1,
656 let tname
= test
.name
;
657 let MetricMap(mm
) = mm
;
660 .insert_metric(&format
!("{}.{}",
669 st
.metrics
.insert_metric(test
.name
.as_slice(),
670 bs
.ns_iter_summ
.median
,
671 bs
.ns_iter_summ
.max
- bs
.ns_iter_summ
.min
);
676 st
.failures
.push((test
, stdout
));
684 let mut st
= try
!(ConsoleTestState
::new(opts
, None
::<io
::Stdout
>));
685 fn len_if_padded(t
: &TestDescAndFn
) -> usize {
686 match t
.testfn
.padding() {
688 PadOnRight
=> t
.desc
.name
.as_slice().len(),
691 match tests
.iter().max_by(|t
|len_if_padded(*t
)) {
693 let n
= t
.desc
.name
.as_slice();
694 st
.max_name_len
= n
.len();
698 try
!(run_tests(opts
, tests
, |x
| callback(&x
, &mut st
)));
699 return st
.write_run_finish();
703 fn should_sort_failures_before_printing_them() {
704 let test_a
= TestDesc
{
705 name
: StaticTestName("a"),
707 should_panic
: ShouldPanic
::No
710 let test_b
= TestDesc
{
711 name
: StaticTestName("b"),
713 should_panic
: ShouldPanic
::No
716 let mut st
= ConsoleTestState
{
718 out
: Raw(Vec
::new()),
726 metrics
: MetricMap
::new(),
727 failures
: vec
!((test_b
, Vec
::new()), (test_a
, Vec
::new()))
730 st
.write_failures().unwrap();
731 let s
= match st
.out
{
732 Raw(ref m
) => String
::from_utf8_lossy(&m
[..]),
733 Pretty(_
) => unreachable
!()
736 let apos
= s
.find("a").unwrap();
737 let bpos
= s
.find("b").unwrap();
738 assert
!(apos
< bpos
);
741 fn use_color(opts
: &TestOpts
) -> bool
{
743 AutoColor
=> get_concurrency() == 1 && stdout_isatty(),
750 fn stdout_isatty() -> bool
{
751 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
754 fn stdout_isatty() -> bool
{
755 const STD_OUTPUT_HANDLE
: libc
::DWORD
= -11i32 as libc
::DWORD
;
757 fn GetStdHandle(which
: libc
::DWORD
) -> libc
::HANDLE
;
758 fn GetConsoleMode(hConsoleHandle
: libc
::HANDLE
,
759 lpMode
: libc
::LPDWORD
) -> libc
::BOOL
;
762 let handle
= GetStdHandle(STD_OUTPUT_HANDLE
);
764 GetConsoleMode(handle
, &mut out
) != 0
770 TeFiltered(Vec
<TestDesc
> ),
771 TeWait(TestDesc
, NamePadding
),
772 TeResult(TestDesc
, TestResult
, Vec
<u8> ),
775 pub type MonitorMsg
= (TestDesc
, TestResult
, Vec
<u8> );
778 fn run_tests
<F
>(opts
: &TestOpts
,
779 tests
: Vec
<TestDescAndFn
> ,
780 mut callback
: F
) -> io
::Result
<()> where
781 F
: FnMut(TestEvent
) -> io
::Result
<()>,
783 let mut filtered_tests
= filter_tests(opts
, tests
);
784 if !opts
.bench_benchmarks
{
785 filtered_tests
= convert_benchmarks_to_tests(filtered_tests
);
788 let filtered_descs
= filtered_tests
.iter()
789 .map(|t
| t
.desc
.clone())
792 try
!(callback(TeFiltered(filtered_descs
)));
794 let (filtered_tests
, filtered_benchs_and_metrics
): (Vec
<_
>, _
) =
795 filtered_tests
.into_iter().partition(|e
| {
797 StaticTestFn(_
) | DynTestFn(_
) => true,
802 // It's tempting to just spawn all the tests at once, but since we have
803 // many tests that run in other processes we would be making a big mess.
804 let concurrency
= get_concurrency();
806 let mut remaining
= filtered_tests
;
810 let (tx
, rx
) = channel
::<MonitorMsg
>();
812 while pending
> 0 || !remaining
.is_empty() {
813 while pending
< concurrency
&& !remaining
.is_empty() {
814 let test
= remaining
.pop().unwrap();
815 if concurrency
== 1 {
816 // We are doing one test at a time so we can print the name
817 // of the test before we run it. Useful for debugging tests
818 // that hang forever.
819 try
!(callback(TeWait(test
.desc
.clone(), test
.testfn
.padding())));
821 run_test(opts
, !opts
.run_tests
, test
, tx
.clone());
825 let (desc
, result
, stdout
) = rx
.recv().unwrap();
826 if concurrency
!= 1 {
827 try
!(callback(TeWait(desc
.clone(), PadNone
)));
829 try
!(callback(TeResult(desc
, result
, stdout
)));
833 if opts
.bench_benchmarks
{
834 // All benchmarks run at the end, in serial.
835 // (this includes metric fns)
836 for b
in filtered_benchs_and_metrics
{
837 try
!(callback(TeWait(b
.desc
.clone(), b
.testfn
.padding())));
838 run_test(opts
, false, b
, tx
.clone());
839 let (test
, result
, stdout
) = rx
.recv().unwrap();
840 try
!(callback(TeResult(test
, result
, stdout
)));
847 fn get_concurrency() -> usize {
848 match env
::var("RUST_TEST_THREADS") {
850 let opt_n
: Option
<usize> = s
.parse().ok();
852 Some(n
) if n
> 0 => n
,
853 _
=> panic
!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s
)
857 if std
::rt
::util
::limit_thread_creation_due_to_osx_and_valgrind() {
860 extern { fn rust_get_num_cpus() -> libc::uintptr_t; }
861 unsafe { rust_get_num_cpus() as usize }
867 pub fn filter_tests(opts
: &TestOpts
, tests
: Vec
<TestDescAndFn
>) -> Vec
<TestDescAndFn
> {
868 let mut filtered
= tests
;
870 // Remove tests that don't match the test filter
871 filtered
= match opts
.filter
{
873 Some(ref filter
) => {
874 filtered
.into_iter().filter(|test
| {
875 test
.desc
.name
.as_slice().contains(&filter
[..])
880 // Maybe pull out the ignored test and unignore them
881 filtered
= if !opts
.run_ignored
{
884 fn filter(test
: TestDescAndFn
) -> Option
<TestDescAndFn
> {
885 if test
.desc
.ignore
{
886 let TestDescAndFn {desc, testfn}
= test
;
888 desc
: TestDesc {ignore: false, ..desc}
,
895 filtered
.into_iter().filter_map(|x
| filter(x
)).collect()
898 // Sort the tests alphabetically
899 filtered
.sort_by(|t1
, t2
| t1
.desc
.name
.as_slice().cmp(t2
.desc
.name
.as_slice()));
904 pub fn convert_benchmarks_to_tests(tests
: Vec
<TestDescAndFn
>) -> Vec
<TestDescAndFn
> {
905 // convert benchmarks to tests, if we're not benchmarking them
906 tests
.into_iter().map(|x
| {
907 let testfn
= match x
.testfn
{
908 DynBenchFn(bench
) => {
909 DynTestFn(Box
::new(move || bench
::run_once(|b
| bench
.run(b
))))
911 StaticBenchFn(benchfn
) => {
912 DynTestFn(Box
::new(move || bench
::run_once(|b
| benchfn(b
))))
916 TestDescAndFn { desc: x.desc, testfn: testfn }
920 pub fn run_test(opts
: &TestOpts
,
923 monitor_ch
: Sender
<MonitorMsg
>) {
925 let TestDescAndFn {desc, testfn}
= test
;
927 if force_ignore
|| desc
.ignore
{
928 monitor_ch
.send((desc
, TrIgnored
, Vec
::new())).unwrap();
932 fn run_test_inner(desc
: TestDesc
,
933 monitor_ch
: Sender
<MonitorMsg
>,
935 testfn
: Thunk
<'
static>) {
936 struct Sink(Arc
<Mutex
<Vec
<u8>>>);
937 impl Write
for Sink
{
938 fn write(&mut self, data
: &[u8]) -> io
::Result
<usize> {
939 Write
::write(&mut *self.0.lock().unwrap(), data
)
941 fn flush(&mut self) -> io
::Result
<()> { Ok(()) }
944 thread
::spawn(move || {
945 let data
= Arc
::new(Mutex
::new(Vec
::new()));
946 let data2
= data
.clone();
947 let cfg
= thread
::Builder
::new().name(match desc
.name
{
948 DynTestName(ref name
) => name
.clone().to_string(),
949 StaticTestName(name
) => name
.to_string(),
952 let result_guard
= cfg
.spawn(move || {
954 io
::set_print(box Sink(data2
.clone()));
955 io
::set_panic(box Sink(data2
));
959 let test_result
= calc_result(&desc
, result_guard
.join());
960 let stdout
= data
.lock().unwrap().to_vec();
961 monitor_ch
.send((desc
.clone(), test_result
, stdout
)).unwrap();
966 DynBenchFn(bencher
) => {
967 let bs
= ::bench
::benchmark(|harness
| bencher
.run(harness
));
968 monitor_ch
.send((desc
, TrBench(bs
), Vec
::new())).unwrap();
971 StaticBenchFn(benchfn
) => {
972 let bs
= ::bench
::benchmark(|harness
| (benchfn
.clone())(harness
));
973 monitor_ch
.send((desc
, TrBench(bs
), Vec
::new())).unwrap();
977 let mut mm
= MetricMap
::new();
978 f
.call_box((&mut mm
,));
979 monitor_ch
.send((desc
, TrMetrics(mm
), Vec
::new())).unwrap();
982 StaticMetricFn(f
) => {
983 let mut mm
= MetricMap
::new();
985 monitor_ch
.send((desc
, TrMetrics(mm
), Vec
::new())).unwrap();
988 DynTestFn(f
) => run_test_inner(desc
, monitor_ch
, opts
.nocapture
, f
),
989 StaticTestFn(f
) => run_test_inner(desc
, monitor_ch
, opts
.nocapture
,
990 Box
::new(move|| f()))
994 fn calc_result(desc
: &TestDesc
, task_result
: Result
<(), Box
<Any
+Send
>>) -> TestResult
{
995 match (&desc
.should_panic
, task_result
) {
996 (&ShouldPanic
::No
, Ok(())) |
997 (&ShouldPanic
::Yes(None
), Err(_
)) => TrOk
,
998 (&ShouldPanic
::Yes(Some(msg
)), Err(ref err
))
999 if err
.downcast_ref
::<String
>()
1001 .or_else(|| err
.downcast_ref
::<&'
static str>().map(|e
| *e
))
1002 .map(|e
| e
.contains(msg
))
1003 .unwrap_or(false) => TrOk
,
1010 pub fn new() -> MetricMap
{
1011 MetricMap(BTreeMap
::new())
1014 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1015 /// must be non-negative. The `noise` indicates the uncertainty of the
1016 /// metric, which doubles as the "noise range" of acceptable
1017 /// pairwise-regressions on this named value, when comparing from one
1018 /// metric to the next using `compare_to_old`.
1020 /// If `noise` is positive, then it means this metric is of a value
1021 /// you want to see grow smaller, so a change larger than `noise` in the
1022 /// positive direction represents a regression.
1024 /// If `noise` is negative, then it means this metric is of a value
1025 /// you want to see grow larger, so a change larger than `noise` in the
1026 /// negative direction represents a regression.
1027 pub fn insert_metric(&mut self, name
: &str, value
: f64, noise
: f64) {
1032 let MetricMap(ref mut map
) = *self;
1033 map
.insert(name
.to_string(), m
);
1036 pub fn fmt_metrics(&self) -> String
{
1037 let MetricMap(ref mm
) = *self;
1038 let v
: Vec
<String
> = mm
.iter()
1039 .map(|(k
,v
)| format
!("{}: {} (+/- {})", *k
,
1049 /// A function that is opaque to the optimizer, to allow benchmarks to
1050 /// pretend to use outputs to assist in avoiding dead-code
1053 /// This function is a no-op, and does not even read from `dummy`.
1054 pub fn black_box
<T
>(dummy
: T
) -> T
{
1055 // we need to "use" the argument in some way LLVM can't
1057 unsafe {asm!("" : : "r"(&dummy))}
1063 /// Callback for benchmark functions to run in their body.
1064 pub fn iter
<T
, F
>(&mut self, mut inner
: F
) where F
: FnMut() -> T
{
1065 self.dur
= Duration
::span(|| {
1066 let k
= self.iterations
;
1073 pub fn ns_elapsed(&mut self) -> u64 {
1074 self.dur
.secs() * 1_000_000_000 + (self.dur
.extra_nanos() as u64)
1077 pub fn ns_per_iter(&mut self) -> u64 {
1078 if self.iterations
== 0 {
1081 self.ns_elapsed() / cmp
::max(self.iterations
, 1)
1085 pub fn bench_n
<F
>(&mut self, n
: u64, f
: F
) where F
: FnOnce(&mut Bencher
) {
1086 self.iterations
= n
;
1090 // This is a more statistics-driven benchmark algorithm
1091 pub fn auto_bench
<F
>(&mut self, mut f
: F
) -> stats
::Summary
where F
: FnMut(&mut Bencher
) {
1092 // Initial bench run to get ballpark figure.
1094 self.bench_n(n
, |x
| f(x
));
1096 // Try to estimate iter count for 1ms falling back to 1m
1097 // iterations if first run took < 1ns.
1098 if self.ns_per_iter() == 0 {
1101 n
= 1_000_000 / cmp
::max(self.ns_per_iter(), 1);
1103 // if the first run took more than 1ms we don't want to just
1104 // be left doing 0 iterations on every loop. The unfortunate
1105 // side effect of not being able to do as many runs is
1106 // automatically handled by the statistical analysis below
1107 // (i.e. larger error bars).
1108 if n
== 0 { n = 1; }
1110 let mut total_run
= Duration
::new(0, 0);
1111 let samples
: &mut [f64] = &mut [0.0_f64; 50];
1113 let mut summ
= None
;
1114 let mut summ5
= None
;
1116 let loop_run
= Duration
::span(|| {
1118 for p
in &mut *samples
{
1119 self.bench_n(n
, |x
| f(x
));
1120 *p
= self.ns_per_iter() as f64;
1123 stats
::winsorize(samples
, 5.0);
1124 summ
= Some(stats
::Summary
::new(samples
));
1126 for p
in &mut *samples
{
1127 self.bench_n(5 * n
, |x
| f(x
));
1128 *p
= self.ns_per_iter() as f64;
1131 stats
::winsorize(samples
, 5.0);
1132 summ5
= Some(stats
::Summary
::new(samples
));
1134 let summ
= summ
.unwrap();
1135 let summ5
= summ5
.unwrap();
1137 // If we've run for 100ms and seem to have converged to a
1139 if loop_run
> Duration
::from_millis(100) &&
1140 summ
.median_abs_dev_pct
< 1.0 &&
1141 summ
.median
- summ5
.median
< summ5
.median_abs_dev
{
1145 total_run
= total_run
+ loop_run
;
1146 // Longest we ever run for is 3s.
1147 if total_run
> Duration
::from_secs(3) {
1151 // If we overflow here just return the results so far. We check a
1152 // multiplier of 10 because we're about to multiply by 2 and the
1153 // next iteration of the loop will also multiply by 5 (to calculate
1154 // the summ5 result)
1155 n
= match n
.checked_mul(10) {
1157 None
=> return summ5
,
1165 use std
::time
::Duration
;
1166 use super::{Bencher, BenchSamples}
;
1168 pub fn benchmark
<F
>(f
: F
) -> BenchSamples
where F
: FnMut(&mut Bencher
) {
1169 let mut bs
= Bencher
{
1171 dur
: Duration
::new(0, 0),
1175 let ns_iter_summ
= bs
.auto_bench(f
);
1177 let ns_iter
= cmp
::max(ns_iter_summ
.median
as u64, 1);
1178 let iter_s
= 1_000_000_000 / ns_iter
;
1179 let mb_s
= (bs
.bytes
* iter_s
) / 1_000_000;
1182 ns_iter_summ
: ns_iter_summ
,
1187 pub fn run_once
<F
>(f
: F
) where F
: FnOnce(&mut Bencher
) {
1188 let mut bs
= Bencher
{
1190 dur
: Duration
::new(0, 0),
1199 use test
::{TrFailed
, TrIgnored
, TrOk
, filter_tests
, parse_opts
,
1200 TestDesc
, TestDescAndFn
, TestOpts
, run_test
,
1202 StaticTestName
, DynTestName
, DynTestFn
, ShouldPanic
};
1203 use std
::thunk
::Thunk
;
1204 use std
::sync
::mpsc
::channel
;
1207 pub fn do_not_run_ignored_tests() {
1208 fn f() { panic!(); }
1209 let desc
= TestDescAndFn
{
1211 name
: StaticTestName("whatever"),
1213 should_panic
: ShouldPanic
::No
,
1215 testfn
: DynTestFn(Box
::new(move|| f())),
1217 let (tx
, rx
) = channel();
1218 run_test(&TestOpts
::new(), false, desc
, tx
);
1219 let (_
, res
, _
) = rx
.recv().unwrap();
1220 assert
!(res
!= TrOk
);
1224 pub fn ignored_tests_result_in_ignored() {
1226 let desc
= TestDescAndFn
{
1228 name
: StaticTestName("whatever"),
1230 should_panic
: ShouldPanic
::No
,
1232 testfn
: DynTestFn(Box
::new(move|| f())),
1234 let (tx
, rx
) = channel();
1235 run_test(&TestOpts
::new(), false, desc
, tx
);
1236 let (_
, res
, _
) = rx
.recv().unwrap();
1237 assert
!(res
== TrIgnored
);
1241 fn test_should_panic() {
1242 fn f() { panic!(); }
1243 let desc
= TestDescAndFn
{
1245 name
: StaticTestName("whatever"),
1247 should_panic
: ShouldPanic
::Yes(None
)
1249 testfn
: DynTestFn(Box
::new(move|| f())),
1251 let (tx
, rx
) = channel();
1252 run_test(&TestOpts
::new(), false, desc
, tx
);
1253 let (_
, res
, _
) = rx
.recv().unwrap();
1254 assert
!(res
== TrOk
);
1258 fn test_should_panic_good_message() {
1259 fn f() { panic!("an error message"); }
1260 let desc
= TestDescAndFn
{
1262 name
: StaticTestName("whatever"),
1264 should_panic
: ShouldPanic
::Yes(Some("error message"))
1266 testfn
: DynTestFn(Box
::new(move|| f())),
1268 let (tx
, rx
) = channel();
1269 run_test(&TestOpts
::new(), false, desc
, tx
);
1270 let (_
, res
, _
) = rx
.recv().unwrap();
1271 assert
!(res
== TrOk
);
1275 fn test_should_panic_bad_message() {
1276 fn f() { panic!("an error message"); }
1277 let desc
= TestDescAndFn
{
1279 name
: StaticTestName("whatever"),
1281 should_panic
: ShouldPanic
::Yes(Some("foobar"))
1283 testfn
: DynTestFn(Box
::new(move|| f())),
1285 let (tx
, rx
) = channel();
1286 run_test(&TestOpts
::new(), false, desc
, tx
);
1287 let (_
, res
, _
) = rx
.recv().unwrap();
1288 assert
!(res
== TrFailed
);
1292 fn test_should_panic_but_succeeds() {
1294 let desc
= TestDescAndFn
{
1296 name
: StaticTestName("whatever"),
1298 should_panic
: ShouldPanic
::Yes(None
)
1300 testfn
: DynTestFn(Box
::new(move|| f())),
1302 let (tx
, rx
) = channel();
1303 run_test(&TestOpts
::new(), false, desc
, tx
);
1304 let (_
, res
, _
) = rx
.recv().unwrap();
1305 assert
!(res
== TrFailed
);
1309 fn parse_ignored_flag() {
1310 let args
= vec
!("progname".to_string(),
1311 "filter".to_string(),
1312 "--ignored".to_string());
1313 let opts
= match parse_opts(&args
) {
1315 _
=> panic
!("Malformed arg in parse_ignored_flag")
1317 assert
!((opts
.run_ignored
));
1321 pub fn filter_for_ignored_option() {
1322 // When we run ignored tests the test filter should filter out all the
1323 // unignored tests and flip the ignore flag on the rest to false
1325 let mut opts
= TestOpts
::new();
1326 opts
.run_tests
= true;
1327 opts
.run_ignored
= true;
1332 name
: StaticTestName("1"),
1334 should_panic
: ShouldPanic
::No
,
1336 testfn
: DynTestFn(Box
::new(move|| {}
)),
1340 name
: StaticTestName("2"),
1342 should_panic
: ShouldPanic
::No
,
1344 testfn
: DynTestFn(Box
::new(move|| {}
)),
1346 let filtered
= filter_tests(&opts
, tests
);
1348 assert_eq
!(filtered
.len(), 1);
1349 assert_eq
!(filtered
[0].desc
.name
.to_string(),
1351 assert
!(filtered
[0].desc
.ignore
== false);
1355 pub fn sort_tests() {
1356 let mut opts
= TestOpts
::new();
1357 opts
.run_tests
= true;
1360 vec
!("sha1::test".to_string(),
1361 "isize::test_to_str".to_string(),
1362 "isize::test_pow".to_string(),
1363 "test::do_not_run_ignored_tests".to_string(),
1364 "test::ignored_tests_result_in_ignored".to_string(),
1365 "test::first_free_arg_should_be_a_filter".to_string(),
1366 "test::parse_ignored_flag".to_string(),
1367 "test::filter_for_ignored_option".to_string(),
1368 "test::sort_tests".to_string());
1372 let mut tests
= Vec
::new();
1373 for name
in &names
{
1374 let test
= TestDescAndFn
{
1376 name
: DynTestName((*name
).clone()),
1378 should_panic
: ShouldPanic
::No
,
1380 testfn
: DynTestFn(Box
::new(testfn
)),
1386 let filtered
= filter_tests(&opts
, tests
);
1389 vec
!("isize::test_pow".to_string(),
1390 "isize::test_to_str".to_string(),
1391 "sha1::test".to_string(),
1392 "test::do_not_run_ignored_tests".to_string(),
1393 "test::filter_for_ignored_option".to_string(),
1394 "test::first_free_arg_should_be_a_filter".to_string(),
1395 "test::ignored_tests_result_in_ignored".to_string(),
1396 "test::parse_ignored_flag".to_string(),
1397 "test::sort_tests".to_string());
1399 for (a
, b
) in expected
.iter().zip(filtered
.iter()) {
1400 assert
!(*a
== b
.desc
.name
.to_string());
1405 pub fn test_metricmap_compare() {
1406 let mut m1
= MetricMap
::new();
1407 let mut m2
= MetricMap
::new();
1408 m1
.insert_metric("in-both-noise", 1000.0, 200.0);
1409 m2
.insert_metric("in-both-noise", 1100.0, 200.0);
1411 m1
.insert_metric("in-first-noise", 1000.0, 2.0);
1412 m2
.insert_metric("in-second-noise", 1000.0, 2.0);
1414 m1
.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1415 m2
.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1417 m1
.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1418 m2
.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1420 m1
.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1421 m2
.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1423 m1
.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1424 m2
.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);