]> git.proxmox.com Git - rustc.git/blob - src/libtest/lib.rs
New upstream version 1.14.0+dfsg1
[rustc.git] / src / libtest / lib.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Support code for rustc's built in unit-test and micro-benchmarking
12 //! framework.
13 //!
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
18 //!
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
20
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
24 // build off of.
25
26 #![crate_name = "test"]
27 #![unstable(feature = "test", issue = "27812")]
28 #![crate_type = "rlib"]
29 #![crate_type = "dylib"]
30 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
31 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
32 html_root_url = "https://doc.rust-lang.org/nightly/",
33 test(attr(deny(warnings))))]
34 #![cfg_attr(not(stage0), deny(warnings))]
35
36 #![feature(asm)]
37 #![feature(libc)]
38 #![feature(rustc_private)]
39 #![feature(set_stdio)]
40 #![feature(staged_api)]
41 #![cfg_attr(stage0, feature(question_mark))]
42 #![feature(panic_unwind)]
43
44 extern crate getopts;
45 extern crate term;
46 extern crate libc;
47 extern crate panic_unwind;
48
49 pub use self::TestFn::*;
50 pub use self::ColorConfig::*;
51 pub use self::TestResult::*;
52 pub use self::TestName::*;
53 use self::TestEvent::*;
54 use self::NamePadding::*;
55 use self::OutputLocation::*;
56
57 use std::panic::{catch_unwind, AssertUnwindSafe};
58 use std::any::Any;
59 use std::cmp;
60 use std::collections::BTreeMap;
61 use std::env;
62 use std::fmt;
63 use std::fs::File;
64 use std::io::prelude::*;
65 use std::io;
66 use std::iter::repeat;
67 use std::path::PathBuf;
68 use std::sync::mpsc::{channel, Sender};
69 use std::sync::{Arc, Mutex};
70 use std::thread;
71 use std::time::{Instant, Duration};
72
73 const TEST_WARN_TIMEOUT_S: u64 = 60;
74
75 // to be used by rustc to compile tests in libtest
76 pub mod test {
77 pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed,
78 TrIgnored, TrOk, Metric, MetricMap, StaticTestFn, StaticTestName, DynTestName,
79 DynTestFn, run_test, test_main, test_main_static, filter_tests, parse_opts,
80 StaticBenchFn, ShouldPanic};
81 }
82
83 pub mod stats;
84
85 // The name of a test. By convention this follows the rules for rust
86 // paths; i.e. it should be a series of identifiers separated by double
87 // colons. This way if some test runner wants to arrange the tests
88 // hierarchically it may.
89
90 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
91 pub enum TestName {
92 StaticTestName(&'static str),
93 DynTestName(String),
94 }
95 impl TestName {
96 fn as_slice(&self) -> &str {
97 match *self {
98 StaticTestName(s) => s,
99 DynTestName(ref s) => s,
100 }
101 }
102 }
103 impl fmt::Display for TestName {
104 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
105 fmt::Display::fmt(self.as_slice(), f)
106 }
107 }
108
109 #[derive(Clone, Copy, PartialEq, Eq)]
110 enum NamePadding {
111 PadNone,
112 PadOnRight,
113 }
114
115 impl TestDesc {
116 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
117 let mut name = String::from(self.name.as_slice());
118 let fill = column_count.saturating_sub(name.len());
119 let pad = repeat(" ").take(fill).collect::<String>();
120 match align {
121 PadNone => name,
122 PadOnRight => {
123 name.push_str(&pad);
124 name
125 }
126 }
127 }
128 }
129
130 /// Represents a benchmark function.
131 pub trait TDynBenchFn: Send {
132 fn run(&self, harness: &mut Bencher);
133 }
134
135 pub trait FnBox<T>: Send + 'static {
136 fn call_box(self: Box<Self>, t: T);
137 }
138
139 impl<T, F: FnOnce(T) + Send + 'static> FnBox<T> for F {
140 fn call_box(self: Box<F>, t: T) {
141 (*self)(t)
142 }
143 }
144
145 // A function that runs a test. If the function returns successfully,
146 // the test succeeds; if the function panics then the test fails. We
147 // may need to come up with a more clever definition of test in order
148 // to support isolation of tests into threads.
149 pub enum TestFn {
150 StaticTestFn(fn()),
151 StaticBenchFn(fn(&mut Bencher)),
152 StaticMetricFn(fn(&mut MetricMap)),
153 DynTestFn(Box<FnBox<()>>),
154 DynMetricFn(Box<for<'a> FnBox<&'a mut MetricMap>>),
155 DynBenchFn(Box<TDynBenchFn + 'static>),
156 }
157
158 impl TestFn {
159 fn padding(&self) -> NamePadding {
160 match *self {
161 StaticTestFn(..) => PadNone,
162 StaticBenchFn(..) => PadOnRight,
163 StaticMetricFn(..) => PadOnRight,
164 DynTestFn(..) => PadNone,
165 DynMetricFn(..) => PadOnRight,
166 DynBenchFn(..) => PadOnRight,
167 }
168 }
169 }
170
171 impl fmt::Debug for TestFn {
172 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
173 f.write_str(match *self {
174 StaticTestFn(..) => "StaticTestFn(..)",
175 StaticBenchFn(..) => "StaticBenchFn(..)",
176 StaticMetricFn(..) => "StaticMetricFn(..)",
177 DynTestFn(..) => "DynTestFn(..)",
178 DynMetricFn(..) => "DynMetricFn(..)",
179 DynBenchFn(..) => "DynBenchFn(..)",
180 })
181 }
182 }
183
184 /// Manager of the benchmarking runs.
185 ///
186 /// This is fed into functions marked with `#[bench]` to allow for
187 /// set-up & tear-down before running a piece of code repeatedly via a
188 /// call to `iter`.
189 #[derive(Copy, Clone)]
190 pub struct Bencher {
191 iterations: u64,
192 dur: Duration,
193 pub bytes: u64,
194 }
195
196 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
197 pub enum ShouldPanic {
198 No,
199 Yes,
200 YesWithMessage(&'static str),
201 }
202
203 // The definition of a single test. A test runner will run a list of
204 // these.
205 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
206 pub struct TestDesc {
207 pub name: TestName,
208 pub ignore: bool,
209 pub should_panic: ShouldPanic,
210 }
211
212 #[derive(Clone)]
213 pub struct TestPaths {
214 pub file: PathBuf, // e.g., compile-test/foo/bar/baz.rs
215 pub base: PathBuf, // e.g., compile-test, auxiliary
216 pub relative_dir: PathBuf, // e.g., foo/bar
217 }
218
219 #[derive(Debug)]
220 pub struct TestDescAndFn {
221 pub desc: TestDesc,
222 pub testfn: TestFn,
223 }
224
225 #[derive(Clone, PartialEq, Debug, Copy)]
226 pub struct Metric {
227 value: f64,
228 noise: f64,
229 }
230
231 impl Metric {
232 pub fn new(value: f64, noise: f64) -> Metric {
233 Metric {
234 value: value,
235 noise: noise,
236 }
237 }
238 }
239
240 #[derive(PartialEq)]
241 pub struct MetricMap(BTreeMap<String, Metric>);
242
243 impl Clone for MetricMap {
244 fn clone(&self) -> MetricMap {
245 let MetricMap(ref map) = *self;
246 MetricMap(map.clone())
247 }
248 }
249
250 // The default console test runner. It accepts the command line
251 // arguments and a vector of test_descs.
252 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>) {
253 let opts = match parse_opts(args) {
254 Some(Ok(o)) => o,
255 Some(Err(msg)) => panic!("{:?}", msg),
256 None => return,
257 };
258 match run_tests_console(&opts, tests) {
259 Ok(true) => {}
260 Ok(false) => std::process::exit(101),
261 Err(e) => panic!("io error when running tests: {:?}", e),
262 }
263 }
264
265 // A variant optimized for invocation with a static test vector.
266 // This will panic (intentionally) when fed any dynamic tests, because
267 // it is copying the static values out into a dynamic vector and cannot
268 // copy dynamic values. It is doing this because from this point on
269 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
270 // semantics into parallel test runners, which in turn requires a Vec<>
271 // rather than a &[].
272 pub fn test_main_static(tests: &[TestDescAndFn]) {
273 let args = env::args().collect::<Vec<_>>();
274 let owned_tests = tests.iter()
275 .map(|t| {
276 match t.testfn {
277 StaticTestFn(f) => {
278 TestDescAndFn {
279 testfn: StaticTestFn(f),
280 desc: t.desc.clone(),
281 }
282 }
283 StaticBenchFn(f) => {
284 TestDescAndFn {
285 testfn: StaticBenchFn(f),
286 desc: t.desc.clone(),
287 }
288 }
289 _ => panic!("non-static tests passed to test::test_main_static"),
290 }
291 })
292 .collect();
293 test_main(&args, owned_tests)
294 }
295
296 #[derive(Copy, Clone)]
297 pub enum ColorConfig {
298 AutoColor,
299 AlwaysColor,
300 NeverColor,
301 }
302
303 pub struct TestOpts {
304 pub filter: Option<String>,
305 pub run_ignored: bool,
306 pub run_tests: bool,
307 pub bench_benchmarks: bool,
308 pub logfile: Option<PathBuf>,
309 pub nocapture: bool,
310 pub color: ColorConfig,
311 pub quiet: bool,
312 pub test_threads: Option<usize>,
313 pub skip: Vec<String>,
314 }
315
316 impl TestOpts {
317 #[cfg(test)]
318 fn new() -> TestOpts {
319 TestOpts {
320 filter: None,
321 run_ignored: false,
322 run_tests: false,
323 bench_benchmarks: false,
324 logfile: None,
325 nocapture: false,
326 color: AutoColor,
327 quiet: false,
328 test_threads: None,
329 skip: vec![],
330 }
331 }
332 }
333
334 /// Result of parsing the options.
335 pub type OptRes = Result<TestOpts, String>;
336
337 #[cfg_attr(rustfmt, rustfmt_skip)]
338 fn optgroups() -> Vec<getopts::OptGroup> {
339 vec![getopts::optflag("", "ignored", "Run ignored tests"),
340 getopts::optflag("", "test", "Run tests and not benchmarks"),
341 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
342 getopts::optflag("h", "help", "Display this message (longer with --help)"),
343 getopts::optopt("", "logfile", "Write logs to the specified file instead \
344 of stdout", "PATH"),
345 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
346 task, allow printing directly"),
347 getopts::optopt("", "test-threads", "Number of threads used for running tests \
348 in parallel", "n_threads"),
349 getopts::optmulti("", "skip", "Skip tests whose names contain FILTER (this flag can \
350 be used multiple times)","FILTER"),
351 getopts::optflag("q", "quiet", "Display one character per test instead of one line"),
352 getopts::optopt("", "color", "Configure coloring of output:
353 auto = colorize if stdout is a tty and tests are run on serially (default);
354 always = always colorize output;
355 never = never colorize output;", "auto|always|never")]
356 }
357
358 fn usage(binary: &str) {
359 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
360 println!(r#"{usage}
361
362 The FILTER string is tested against the name of all tests, and only those
363 tests whose names contain the filter are run.
364
365 By default, all tests are run in parallel. This can be altered with the
366 --test-threads flag or the RUST_TEST_THREADS environment variable when running
367 tests (set it to 1).
368
369 All tests have their standard output and standard error captured by default.
370 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
371 environment variable to a value other than "0". Logging is not captured by default.
372
373 Test Attributes:
374
375 #[test] - Indicates a function is a test to be run. This function
376 takes no arguments.
377 #[bench] - Indicates a function is a benchmark to be run. This
378 function takes one argument (test::Bencher).
379 #[should_panic] - This function (also labeled with #[test]) will only pass if
380 the code causes a panic (an assertion failure or panic!)
381 A message may be provided, which the failure string must
382 contain: #[should_panic(expected = "foo")].
383 #[ignore] - When applied to a function which is already attributed as a
384 test, then the test runner will ignore these tests during
385 normal test runs. Running with --ignored will run these
386 tests."#,
387 usage = getopts::usage(&message, &optgroups()));
388 }
389
390 // Parses command line arguments into test options
391 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
392 let args_ = &args[1..];
393 let matches = match getopts::getopts(args_, &optgroups()) {
394 Ok(m) => m,
395 Err(f) => return Some(Err(f.to_string())),
396 };
397
398 if matches.opt_present("h") {
399 usage(&args[0]);
400 return None;
401 }
402
403 let filter = if !matches.free.is_empty() {
404 Some(matches.free[0].clone())
405 } else {
406 None
407 };
408
409 let run_ignored = matches.opt_present("ignored");
410 let quiet = matches.opt_present("quiet");
411
412 let logfile = matches.opt_str("logfile");
413 let logfile = logfile.map(|s| PathBuf::from(&s));
414
415 let bench_benchmarks = matches.opt_present("bench");
416 let run_tests = !bench_benchmarks || matches.opt_present("test");
417
418 let mut nocapture = matches.opt_present("nocapture");
419 if !nocapture {
420 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
421 Ok(val) => &val != "0",
422 Err(_) => false
423 };
424 }
425
426 let test_threads = match matches.opt_str("test-threads") {
427 Some(n_str) =>
428 match n_str.parse::<usize>() {
429 Ok(n) => Some(n),
430 Err(e) =>
431 return Some(Err(format!("argument for --test-threads must be a number > 0 \
432 (error: {})", e)))
433 },
434 None =>
435 None,
436 };
437
438 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
439 Some("auto") | None => AutoColor,
440 Some("always") => AlwaysColor,
441 Some("never") => NeverColor,
442
443 Some(v) => {
444 return Some(Err(format!("argument for --color must be auto, always, or never (was \
445 {})",
446 v)))
447 }
448 };
449
450 let test_opts = TestOpts {
451 filter: filter,
452 run_ignored: run_ignored,
453 run_tests: run_tests,
454 bench_benchmarks: bench_benchmarks,
455 logfile: logfile,
456 nocapture: nocapture,
457 color: color,
458 quiet: quiet,
459 test_threads: test_threads,
460 skip: matches.opt_strs("skip"),
461 };
462
463 Some(Ok(test_opts))
464 }
465
466 #[derive(Clone, PartialEq)]
467 pub struct BenchSamples {
468 ns_iter_summ: stats::Summary,
469 mb_s: usize,
470 }
471
472 #[derive(Clone, PartialEq)]
473 pub enum TestResult {
474 TrOk,
475 TrFailed,
476 TrIgnored,
477 TrMetrics(MetricMap),
478 TrBench(BenchSamples),
479 }
480
481 unsafe impl Send for TestResult {}
482
483 enum OutputLocation<T> {
484 Pretty(Box<term::StdoutTerminal>),
485 Raw(T),
486 }
487
488 struct ConsoleTestState<T> {
489 log_out: Option<File>,
490 out: OutputLocation<T>,
491 use_color: bool,
492 quiet: bool,
493 total: usize,
494 passed: usize,
495 failed: usize,
496 ignored: usize,
497 measured: usize,
498 metrics: MetricMap,
499 failures: Vec<(TestDesc, Vec<u8>)>,
500 max_name_len: usize, // number of columns to fill when aligning names
501 }
502
503 impl<T: Write> ConsoleTestState<T> {
504 pub fn new(opts: &TestOpts, _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
505 let log_out = match opts.logfile {
506 Some(ref path) => Some(File::create(path)?),
507 None => None,
508 };
509 let out = match term::stdout() {
510 None => Raw(io::stdout()),
511 Some(t) => Pretty(t),
512 };
513
514 Ok(ConsoleTestState {
515 out: out,
516 log_out: log_out,
517 use_color: use_color(opts),
518 quiet: opts.quiet,
519 total: 0,
520 passed: 0,
521 failed: 0,
522 ignored: 0,
523 measured: 0,
524 metrics: MetricMap::new(),
525 failures: Vec::new(),
526 max_name_len: 0,
527 })
528 }
529
530 pub fn write_ok(&mut self) -> io::Result<()> {
531 self.write_short_result("ok", ".", term::color::GREEN)
532 }
533
534 pub fn write_failed(&mut self) -> io::Result<()> {
535 self.write_short_result("FAILED", "F", term::color::RED)
536 }
537
538 pub fn write_ignored(&mut self) -> io::Result<()> {
539 self.write_short_result("ignored", "i", term::color::YELLOW)
540 }
541
542 pub fn write_metric(&mut self) -> io::Result<()> {
543 self.write_pretty("metric", term::color::CYAN)
544 }
545
546 pub fn write_bench(&mut self) -> io::Result<()> {
547 self.write_pretty("bench", term::color::CYAN)
548 }
549
550 pub fn write_short_result(&mut self, verbose: &str, quiet: &str, color: term::color::Color)
551 -> io::Result<()> {
552 if self.quiet {
553 self.write_pretty(quiet, color)
554 } else {
555 self.write_pretty(verbose, color)?;
556 self.write_plain("\n")
557 }
558 }
559
560 pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
561 match self.out {
562 Pretty(ref mut term) => {
563 if self.use_color {
564 term.fg(color)?;
565 }
566 term.write_all(word.as_bytes())?;
567 if self.use_color {
568 term.reset()?;
569 }
570 term.flush()
571 }
572 Raw(ref mut stdout) => {
573 stdout.write_all(word.as_bytes())?;
574 stdout.flush()
575 }
576 }
577 }
578
579 pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
580 match self.out {
581 Pretty(ref mut term) => {
582 term.write_all(s.as_bytes())?;
583 term.flush()
584 }
585 Raw(ref mut stdout) => {
586 stdout.write_all(s.as_bytes())?;
587 stdout.flush()
588 }
589 }
590 }
591
592 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
593 self.total = len;
594 let noun = if len != 1 {
595 "tests"
596 } else {
597 "test"
598 };
599 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
600 }
601
602 pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::Result<()> {
603 if self.quiet && align != PadOnRight {
604 Ok(())
605 } else {
606 let name = test.padded_name(self.max_name_len, align);
607 self.write_plain(&format!("test {} ... ", name))
608 }
609 }
610
611 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
612 match *result {
613 TrOk => self.write_ok(),
614 TrFailed => self.write_failed(),
615 TrIgnored => self.write_ignored(),
616 TrMetrics(ref mm) => {
617 self.write_metric()?;
618 self.write_plain(&format!(": {}\n", mm.fmt_metrics()))
619 }
620 TrBench(ref bs) => {
621 self.write_bench()?;
622 self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
623 }
624 }
625 }
626
627 pub fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
628 self.write_plain(&format!("test {} has been running for over {} seconds\n",
629 desc.name,
630 TEST_WARN_TIMEOUT_S))
631 }
632
633 pub fn write_log(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
634 match self.log_out {
635 None => Ok(()),
636 Some(ref mut o) => {
637 let s = format!("{} {}\n",
638 match *result {
639 TrOk => "ok".to_owned(),
640 TrFailed => "failed".to_owned(),
641 TrIgnored => "ignored".to_owned(),
642 TrMetrics(ref mm) => mm.fmt_metrics(),
643 TrBench(ref bs) => fmt_bench_samples(bs),
644 },
645 test.name);
646 o.write_all(s.as_bytes())
647 }
648 }
649 }
650
651 pub fn write_failures(&mut self) -> io::Result<()> {
652 self.write_plain("\nfailures:\n")?;
653 let mut failures = Vec::new();
654 let mut fail_out = String::new();
655 for &(ref f, ref stdout) in &self.failures {
656 failures.push(f.name.to_string());
657 if !stdout.is_empty() {
658 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
659 let output = String::from_utf8_lossy(stdout);
660 fail_out.push_str(&output);
661 fail_out.push_str("\n");
662 }
663 }
664 if !fail_out.is_empty() {
665 self.write_plain("\n")?;
666 self.write_plain(&fail_out)?;
667 }
668
669 self.write_plain("\nfailures:\n")?;
670 failures.sort();
671 for name in &failures {
672 self.write_plain(&format!(" {}\n", name))?;
673 }
674 Ok(())
675 }
676
677 pub fn write_run_finish(&mut self) -> io::Result<bool> {
678 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
679
680 let success = self.failed == 0;
681 if !success {
682 self.write_failures()?;
683 }
684
685 self.write_plain("\ntest result: ")?;
686 if success {
687 // There's no parallelism at this point so it's safe to use color
688 self.write_pretty("ok", term::color::GREEN)?;
689 } else {
690 self.write_pretty("FAILED", term::color::RED)?;
691 }
692 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
693 self.passed,
694 self.failed,
695 self.ignored,
696 self.measured);
697 self.write_plain(&s)?;
698 return Ok(success);
699 }
700 }
701
702 // Format a number with thousands separators
703 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
704 use std::fmt::Write;
705 let mut output = String::new();
706 let mut trailing = false;
707 for &pow in &[9, 6, 3, 0] {
708 let base = 10_usize.pow(pow);
709 if pow == 0 || trailing || n / base != 0 {
710 if !trailing {
711 output.write_fmt(format_args!("{}", n / base)).unwrap();
712 } else {
713 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
714 }
715 if pow != 0 {
716 output.push(sep);
717 }
718 trailing = true;
719 }
720 n %= base;
721 }
722
723 output
724 }
725
726 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
727 use std::fmt::Write;
728 let mut output = String::new();
729
730 let median = bs.ns_iter_summ.median as usize;
731 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
732
733 output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
734 fmt_thousands_sep(median, ','),
735 fmt_thousands_sep(deviation, ',')))
736 .unwrap();
737 if bs.mb_s != 0 {
738 output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
739 }
740 output
741 }
742
743 // A simple console test runner
744 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
745
746 fn callback<T: Write>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::Result<()> {
747 match (*event).clone() {
748 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
749 TeWait(ref test, padding) => st.write_test_start(test, padding),
750 TeTimeout(ref test) => st.write_timeout(test),
751 TeResult(test, result, stdout) => {
752 st.write_log(&test, &result)?;
753 st.write_result(&result)?;
754 match result {
755 TrOk => st.passed += 1,
756 TrIgnored => st.ignored += 1,
757 TrMetrics(mm) => {
758 let tname = test.name;
759 let MetricMap(mm) = mm;
760 for (k, v) in &mm {
761 st.metrics
762 .insert_metric(&format!("{}.{}", tname, k), v.value, v.noise);
763 }
764 st.measured += 1
765 }
766 TrBench(bs) => {
767 st.metrics.insert_metric(test.name.as_slice(),
768 bs.ns_iter_summ.median,
769 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
770 st.measured += 1
771 }
772 TrFailed => {
773 st.failed += 1;
774 st.failures.push((test, stdout));
775 }
776 }
777 Ok(())
778 }
779 }
780 }
781
782 let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
783 fn len_if_padded(t: &TestDescAndFn) -> usize {
784 match t.testfn.padding() {
785 PadNone => 0,
786 PadOnRight => t.desc.name.as_slice().len(),
787 }
788 }
789 if let Some(t) = tests.iter().max_by_key(|t| len_if_padded(*t)) {
790 let n = t.desc.name.as_slice();
791 st.max_name_len = n.len();
792 }
793 run_tests(opts, tests, |x| callback(&x, &mut st))?;
794 return st.write_run_finish();
795 }
796
797 #[test]
798 fn should_sort_failures_before_printing_them() {
799 let test_a = TestDesc {
800 name: StaticTestName("a"),
801 ignore: false,
802 should_panic: ShouldPanic::No,
803 };
804
805 let test_b = TestDesc {
806 name: StaticTestName("b"),
807 ignore: false,
808 should_panic: ShouldPanic::No,
809 };
810
811 let mut st = ConsoleTestState {
812 log_out: None,
813 out: Raw(Vec::new()),
814 use_color: false,
815 quiet: false,
816 total: 0,
817 passed: 0,
818 failed: 0,
819 ignored: 0,
820 measured: 0,
821 max_name_len: 10,
822 metrics: MetricMap::new(),
823 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
824 };
825
826 st.write_failures().unwrap();
827 let s = match st.out {
828 Raw(ref m) => String::from_utf8_lossy(&m[..]),
829 Pretty(_) => unreachable!(),
830 };
831
832 let apos = s.find("a").unwrap();
833 let bpos = s.find("b").unwrap();
834 assert!(apos < bpos);
835 }
836
837 fn use_color(opts: &TestOpts) -> bool {
838 match opts.color {
839 AutoColor => !opts.nocapture && stdout_isatty(),
840 AlwaysColor => true,
841 NeverColor => false,
842 }
843 }
844
845 #[cfg(unix)]
846 fn stdout_isatty() -> bool {
847 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
848 }
849 #[cfg(windows)]
850 fn stdout_isatty() -> bool {
851 type DWORD = u32;
852 type BOOL = i32;
853 type HANDLE = *mut u8;
854 type LPDWORD = *mut u32;
855 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
856 extern "system" {
857 fn GetStdHandle(which: DWORD) -> HANDLE;
858 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
859 }
860 unsafe {
861 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
862 let mut out = 0;
863 GetConsoleMode(handle, &mut out) != 0
864 }
865 }
866
867 #[derive(Clone)]
868 enum TestEvent {
869 TeFiltered(Vec<TestDesc>),
870 TeWait(TestDesc, NamePadding),
871 TeResult(TestDesc, TestResult, Vec<u8>),
872 TeTimeout(TestDesc),
873 }
874
875 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
876
877
878 fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
879 where F: FnMut(TestEvent) -> io::Result<()>
880 {
881 use std::collections::HashMap;
882 use std::sync::mpsc::RecvTimeoutError;
883
884 let mut filtered_tests = filter_tests(opts, tests);
885 if !opts.bench_benchmarks {
886 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
887 }
888
889 let filtered_descs = filtered_tests.iter()
890 .map(|t| t.desc.clone())
891 .collect();
892
893 callback(TeFiltered(filtered_descs))?;
894
895 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
896 filtered_tests.into_iter().partition(|e| {
897 match e.testfn {
898 StaticTestFn(_) | DynTestFn(_) => true,
899 _ => false,
900 }
901 });
902
903 let concurrency = match opts.test_threads {
904 Some(n) => n,
905 None => get_concurrency(),
906 };
907
908 let mut remaining = filtered_tests;
909 remaining.reverse();
910 let mut pending = 0;
911
912 let (tx, rx) = channel::<MonitorMsg>();
913
914 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
915
916 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
917 let now = Instant::now();
918 let timed_out = running_tests.iter()
919 .filter_map(|(desc, timeout)| if &now >= timeout { Some(desc.clone())} else { None })
920 .collect();
921 for test in &timed_out {
922 running_tests.remove(test);
923 }
924 timed_out
925 };
926
927 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
928 running_tests.values().min().map(|next_timeout| {
929 let now = Instant::now();
930 if *next_timeout >= now {
931 *next_timeout - now
932 } else {
933 Duration::new(0, 0)
934 }})
935 };
936
937 while pending > 0 || !remaining.is_empty() {
938 while pending < concurrency && !remaining.is_empty() {
939 let test = remaining.pop().unwrap();
940 if concurrency == 1 {
941 // We are doing one test at a time so we can print the name
942 // of the test before we run it. Useful for debugging tests
943 // that hang forever.
944 callback(TeWait(test.desc.clone(), test.testfn.padding()))?;
945 }
946 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
947 running_tests.insert(test.desc.clone(), timeout);
948 run_test(opts, !opts.run_tests, test, tx.clone());
949 pending += 1;
950 }
951
952 let mut res;
953 loop {
954 if let Some(timeout) = calc_timeout(&running_tests) {
955 res = rx.recv_timeout(timeout);
956 for test in get_timed_out_tests(&mut running_tests) {
957 callback(TeTimeout(test))?;
958 }
959 if res != Err(RecvTimeoutError::Timeout) {
960 break;
961 }
962 } else {
963 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
964 break;
965 }
966 }
967
968 let (desc, result, stdout) = res.unwrap();
969 running_tests.remove(&desc);
970
971 if concurrency != 1 {
972 callback(TeWait(desc.clone(), PadNone))?;
973 }
974 callback(TeResult(desc, result, stdout))?;
975 pending -= 1;
976 }
977
978 if opts.bench_benchmarks {
979 // All benchmarks run at the end, in serial.
980 // (this includes metric fns)
981 for b in filtered_benchs_and_metrics {
982 callback(TeWait(b.desc.clone(), b.testfn.padding()))?;
983 run_test(opts, false, b, tx.clone());
984 let (test, result, stdout) = rx.recv().unwrap();
985 callback(TeResult(test, result, stdout))?;
986 }
987 }
988 Ok(())
989 }
990
991 #[allow(deprecated)]
992 fn get_concurrency() -> usize {
993 return match env::var("RUST_TEST_THREADS") {
994 Ok(s) => {
995 let opt_n: Option<usize> = s.parse().ok();
996 match opt_n {
997 Some(n) if n > 0 => n,
998 _ => {
999 panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.",
1000 s)
1001 }
1002 }
1003 }
1004 Err(..) => num_cpus(),
1005 };
1006
1007 #[cfg(windows)]
1008 #[allow(bad_style)]
1009 fn num_cpus() -> usize {
1010 #[repr(C)]
1011 struct SYSTEM_INFO {
1012 wProcessorArchitecture: u16,
1013 wReserved: u16,
1014 dwPageSize: u32,
1015 lpMinimumApplicationAddress: *mut u8,
1016 lpMaximumApplicationAddress: *mut u8,
1017 dwActiveProcessorMask: *mut u8,
1018 dwNumberOfProcessors: u32,
1019 dwProcessorType: u32,
1020 dwAllocationGranularity: u32,
1021 wProcessorLevel: u16,
1022 wProcessorRevision: u16,
1023 }
1024 extern "system" {
1025 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1026 }
1027 unsafe {
1028 let mut sysinfo = std::mem::zeroed();
1029 GetSystemInfo(&mut sysinfo);
1030 sysinfo.dwNumberOfProcessors as usize
1031 }
1032 }
1033
1034 #[cfg(any(target_os = "linux",
1035 target_os = "macos",
1036 target_os = "ios",
1037 target_os = "android",
1038 target_os = "solaris",
1039 target_os = "emscripten",
1040 target_os = "fuchsia"))]
1041 fn num_cpus() -> usize {
1042 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1043 }
1044
1045 #[cfg(any(target_os = "freebsd",
1046 target_os = "dragonfly",
1047 target_os = "bitrig",
1048 target_os = "netbsd"))]
1049 fn num_cpus() -> usize {
1050 use std::ptr;
1051
1052 let mut cpus: libc::c_uint = 0;
1053 let mut cpus_size = std::mem::size_of_val(&cpus);
1054
1055 unsafe {
1056 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1057 }
1058 if cpus < 1 {
1059 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1060 unsafe {
1061 libc::sysctl(mib.as_mut_ptr(),
1062 2,
1063 &mut cpus as *mut _ as *mut _,
1064 &mut cpus_size as *mut _ as *mut _,
1065 ptr::null_mut(),
1066 0);
1067 }
1068 if cpus < 1 {
1069 cpus = 1;
1070 }
1071 }
1072 cpus as usize
1073 }
1074
1075 #[cfg(target_os = "openbsd")]
1076 fn num_cpus() -> usize {
1077 use std::ptr;
1078
1079 let mut cpus: libc::c_uint = 0;
1080 let mut cpus_size = std::mem::size_of_val(&cpus);
1081 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1082
1083 unsafe {
1084 libc::sysctl(mib.as_mut_ptr(),
1085 2,
1086 &mut cpus as *mut _ as *mut _,
1087 &mut cpus_size as *mut _ as *mut _,
1088 ptr::null_mut(),
1089 0);
1090 }
1091 if cpus < 1 {
1092 cpus = 1;
1093 }
1094 cpus as usize
1095 }
1096
1097 #[cfg(target_os = "haiku")]
1098 fn num_cpus() -> usize {
1099 // FIXME: implement
1100 1
1101 }
1102 }
1103
1104 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1105 let mut filtered = tests;
1106
1107 // Remove tests that don't match the test filter
1108 filtered = match opts.filter {
1109 None => filtered,
1110 Some(ref filter) => {
1111 filtered.into_iter()
1112 .filter(|test| test.desc.name.as_slice().contains(&filter[..]))
1113 .collect()
1114 }
1115 };
1116
1117 // Skip tests that match any of the skip filters
1118 filtered = filtered.into_iter()
1119 .filter(|t| !opts.skip.iter().any(|sf| t.desc.name.as_slice().contains(&sf[..])))
1120 .collect();
1121
1122 // Maybe pull out the ignored test and unignore them
1123 filtered = if !opts.run_ignored {
1124 filtered
1125 } else {
1126 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1127 if test.desc.ignore {
1128 let TestDescAndFn {desc, testfn} = test;
1129 Some(TestDescAndFn {
1130 desc: TestDesc { ignore: false, ..desc },
1131 testfn: testfn,
1132 })
1133 } else {
1134 None
1135 }
1136 }
1137 filtered.into_iter().filter_map(filter).collect()
1138 };
1139
1140 // Sort the tests alphabetically
1141 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1142
1143 filtered
1144 }
1145
1146 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1147 // convert benchmarks to tests, if we're not benchmarking them
1148 tests.into_iter().map(|x| {
1149 let testfn = match x.testfn {
1150 DynBenchFn(bench) => {
1151 DynTestFn(Box::new(move |()| {
1152 bench::run_once(|b| bench.run(b))
1153 }))
1154 }
1155 StaticBenchFn(benchfn) => {
1156 DynTestFn(Box::new(move |()| {
1157 bench::run_once(|b| benchfn(b))
1158 }))
1159 }
1160 f => f,
1161 };
1162 TestDescAndFn {
1163 desc: x.desc,
1164 testfn: testfn,
1165 }
1166 }).collect()
1167 }
1168
1169 pub fn run_test(opts: &TestOpts,
1170 force_ignore: bool,
1171 test: TestDescAndFn,
1172 monitor_ch: Sender<MonitorMsg>) {
1173
1174 let TestDescAndFn {desc, testfn} = test;
1175
1176 if force_ignore || desc.ignore {
1177 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1178 return;
1179 }
1180
1181 fn run_test_inner(desc: TestDesc,
1182 monitor_ch: Sender<MonitorMsg>,
1183 nocapture: bool,
1184 testfn: Box<FnBox<()>>) {
1185 struct Sink(Arc<Mutex<Vec<u8>>>);
1186 impl Write for Sink {
1187 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1188 Write::write(&mut *self.0.lock().unwrap(), data)
1189 }
1190 fn flush(&mut self) -> io::Result<()> {
1191 Ok(())
1192 }
1193 }
1194
1195 // Buffer for capturing standard I/O
1196 let data = Arc::new(Mutex::new(Vec::new()));
1197 let data2 = data.clone();
1198
1199 let name = desc.name.clone();
1200 let runtest = move || {
1201 let oldio = if !nocapture {
1202 Some((
1203 io::set_print(Some(Box::new(Sink(data2.clone())))),
1204 io::set_panic(Some(Box::new(Sink(data2))))
1205 ))
1206 } else {
1207 None
1208 };
1209
1210 let result = catch_unwind(AssertUnwindSafe(|| {
1211 testfn.call_box(())
1212 }));
1213
1214 if let Some((printio, panicio)) = oldio {
1215 io::set_print(printio);
1216 io::set_panic(panicio);
1217 };
1218
1219 let test_result = calc_result(&desc, result);
1220 let stdout = data.lock().unwrap().to_vec();
1221 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1222 };
1223
1224
1225 // If the platform is single-threaded we're just going to run
1226 // the test synchronously, regardless of the concurrency
1227 // level.
1228 let supports_threads = !cfg!(target_os = "emscripten");
1229 if supports_threads {
1230 let cfg = thread::Builder::new().name(match name {
1231 DynTestName(ref name) => name.clone(),
1232 StaticTestName(name) => name.to_owned(),
1233 });
1234 cfg.spawn(runtest).unwrap();
1235 } else {
1236 runtest();
1237 }
1238 }
1239
1240 match testfn {
1241 DynBenchFn(bencher) => {
1242 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1243 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1244 return;
1245 }
1246 StaticBenchFn(benchfn) => {
1247 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1248 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1249 return;
1250 }
1251 DynMetricFn(f) => {
1252 let mut mm = MetricMap::new();
1253 f.call_box(&mut mm);
1254 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1255 return;
1256 }
1257 StaticMetricFn(f) => {
1258 let mut mm = MetricMap::new();
1259 f(&mut mm);
1260 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1261 return;
1262 }
1263 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1264 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1265 Box::new(move |()| f())),
1266 }
1267 }
1268
1269 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
1270 match (&desc.should_panic, task_result) {
1271 (&ShouldPanic::No, Ok(())) |
1272 (&ShouldPanic::Yes, Err(_)) => TrOk,
1273 (&ShouldPanic::YesWithMessage(msg), Err(ref err))
1274 if err.downcast_ref::<String>()
1275 .map(|e| &**e)
1276 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1277 .map(|e| e.contains(msg))
1278 .unwrap_or(false) => TrOk,
1279 _ => TrFailed,
1280 }
1281 }
1282
1283 impl MetricMap {
1284 pub fn new() -> MetricMap {
1285 MetricMap(BTreeMap::new())
1286 }
1287
1288 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1289 /// must be non-negative. The `noise` indicates the uncertainty of the
1290 /// metric, which doubles as the "noise range" of acceptable
1291 /// pairwise-regressions on this named value, when comparing from one
1292 /// metric to the next using `compare_to_old`.
1293 ///
1294 /// If `noise` is positive, then it means this metric is of a value
1295 /// you want to see grow smaller, so a change larger than `noise` in the
1296 /// positive direction represents a regression.
1297 ///
1298 /// If `noise` is negative, then it means this metric is of a value
1299 /// you want to see grow larger, so a change larger than `noise` in the
1300 /// negative direction represents a regression.
1301 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1302 let m = Metric {
1303 value: value,
1304 noise: noise,
1305 };
1306 let MetricMap(ref mut map) = *self;
1307 map.insert(name.to_owned(), m);
1308 }
1309
1310 pub fn fmt_metrics(&self) -> String {
1311 let MetricMap(ref mm) = *self;
1312 let v: Vec<String> = mm.iter()
1313 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1314 .collect();
1315 v.join(", ")
1316 }
1317 }
1318
1319
1320 // Benchmarking
1321
1322 /// A function that is opaque to the optimizer, to allow benchmarks to
1323 /// pretend to use outputs to assist in avoiding dead-code
1324 /// elimination.
1325 ///
1326 /// This function is a no-op, and does not even read from `dummy`.
1327 #[cfg(not(any(all(target_os = "nacl", target_arch = "le32"),
1328 target_arch = "asmjs", target_arch = "wasm32")))]
1329 pub fn black_box<T>(dummy: T) -> T {
1330 // we need to "use" the argument in some way LLVM can't
1331 // introspect.
1332 unsafe { asm!("" : : "r"(&dummy)) }
1333 dummy
1334 }
1335 #[cfg(any(all(target_os = "nacl", target_arch = "le32"),
1336 target_arch = "asmjs", target_arch = "wasm32"))]
1337 #[inline(never)]
1338 pub fn black_box<T>(dummy: T) -> T {
1339 dummy
1340 }
1341
1342
1343 impl Bencher {
1344 /// Callback for benchmark functions to run in their body.
1345 pub fn iter<T, F>(&mut self, mut inner: F)
1346 where F: FnMut() -> T
1347 {
1348 let start = Instant::now();
1349 let k = self.iterations;
1350 for _ in 0..k {
1351 black_box(inner());
1352 }
1353 self.dur = start.elapsed();
1354 }
1355
1356 pub fn ns_elapsed(&mut self) -> u64 {
1357 self.dur.as_secs() * 1_000_000_000 + (self.dur.subsec_nanos() as u64)
1358 }
1359
1360 pub fn ns_per_iter(&mut self) -> u64 {
1361 if self.iterations == 0 {
1362 0
1363 } else {
1364 self.ns_elapsed() / cmp::max(self.iterations, 1)
1365 }
1366 }
1367
1368 pub fn bench_n<F>(&mut self, n: u64, f: F)
1369 where F: FnOnce(&mut Bencher)
1370 {
1371 self.iterations = n;
1372 f(self);
1373 }
1374
1375 // This is a more statistics-driven benchmark algorithm
1376 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary
1377 where F: FnMut(&mut Bencher)
1378 {
1379 // Initial bench run to get ballpark figure.
1380 let mut n = 1;
1381 self.bench_n(n, |x| f(x));
1382
1383 // Try to estimate iter count for 1ms falling back to 1m
1384 // iterations if first run took < 1ns.
1385 if self.ns_per_iter() == 0 {
1386 n = 1_000_000;
1387 } else {
1388 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1389 }
1390 // if the first run took more than 1ms we don't want to just
1391 // be left doing 0 iterations on every loop. The unfortunate
1392 // side effect of not being able to do as many runs is
1393 // automatically handled by the statistical analysis below
1394 // (i.e. larger error bars).
1395 if n == 0 {
1396 n = 1;
1397 }
1398
1399 let mut total_run = Duration::new(0, 0);
1400 let samples: &mut [f64] = &mut [0.0_f64; 50];
1401 loop {
1402 let loop_start = Instant::now();
1403
1404 for p in &mut *samples {
1405 self.bench_n(n, |x| f(x));
1406 *p = self.ns_per_iter() as f64;
1407 }
1408
1409 stats::winsorize(samples, 5.0);
1410 let summ = stats::Summary::new(samples);
1411
1412 for p in &mut *samples {
1413 self.bench_n(5 * n, |x| f(x));
1414 *p = self.ns_per_iter() as f64;
1415 }
1416
1417 stats::winsorize(samples, 5.0);
1418 let summ5 = stats::Summary::new(samples);
1419 let loop_run = loop_start.elapsed();
1420
1421 // If we've run for 100ms and seem to have converged to a
1422 // stable median.
1423 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 &&
1424 summ.median - summ5.median < summ5.median_abs_dev {
1425 return summ5;
1426 }
1427
1428 total_run = total_run + loop_run;
1429 // Longest we ever run for is 3s.
1430 if total_run > Duration::from_secs(3) {
1431 return summ5;
1432 }
1433
1434 // If we overflow here just return the results so far. We check a
1435 // multiplier of 10 because we're about to multiply by 2 and the
1436 // next iteration of the loop will also multiply by 5 (to calculate
1437 // the summ5 result)
1438 n = match n.checked_mul(10) {
1439 Some(_) => n * 2,
1440 None => return summ5,
1441 };
1442 }
1443 }
1444 }
1445
1446 pub mod bench {
1447 use std::cmp;
1448 use std::time::Duration;
1449 use super::{Bencher, BenchSamples};
1450
1451 pub fn benchmark<F>(f: F) -> BenchSamples
1452 where F: FnMut(&mut Bencher)
1453 {
1454 let mut bs = Bencher {
1455 iterations: 0,
1456 dur: Duration::new(0, 0),
1457 bytes: 0,
1458 };
1459
1460 let ns_iter_summ = bs.auto_bench(f);
1461
1462 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1463 let mb_s = bs.bytes * 1000 / ns_iter;
1464
1465 BenchSamples {
1466 ns_iter_summ: ns_iter_summ,
1467 mb_s: mb_s as usize,
1468 }
1469 }
1470
1471 pub fn run_once<F>(f: F)
1472 where F: FnOnce(&mut Bencher)
1473 {
1474 let mut bs = Bencher {
1475 iterations: 0,
1476 dur: Duration::new(0, 0),
1477 bytes: 0,
1478 };
1479 bs.bench_n(1, f);
1480 }
1481 }
1482
1483 #[cfg(test)]
1484 mod tests {
1485 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc, TestDescAndFn,
1486 TestOpts, run_test, MetricMap, StaticTestName, DynTestName, DynTestFn, ShouldPanic};
1487 use std::sync::mpsc::channel;
1488
1489 #[test]
1490 pub fn do_not_run_ignored_tests() {
1491 fn f() {
1492 panic!();
1493 }
1494 let desc = TestDescAndFn {
1495 desc: TestDesc {
1496 name: StaticTestName("whatever"),
1497 ignore: true,
1498 should_panic: ShouldPanic::No,
1499 },
1500 testfn: DynTestFn(Box::new(move |()| f())),
1501 };
1502 let (tx, rx) = channel();
1503 run_test(&TestOpts::new(), false, desc, tx);
1504 let (_, res, _) = rx.recv().unwrap();
1505 assert!(res != TrOk);
1506 }
1507
1508 #[test]
1509 pub fn ignored_tests_result_in_ignored() {
1510 fn f() {}
1511 let desc = TestDescAndFn {
1512 desc: TestDesc {
1513 name: StaticTestName("whatever"),
1514 ignore: true,
1515 should_panic: ShouldPanic::No,
1516 },
1517 testfn: DynTestFn(Box::new(move |()| f())),
1518 };
1519 let (tx, rx) = channel();
1520 run_test(&TestOpts::new(), false, desc, tx);
1521 let (_, res, _) = rx.recv().unwrap();
1522 assert!(res == TrIgnored);
1523 }
1524
1525 #[test]
1526 fn test_should_panic() {
1527 fn f() {
1528 panic!();
1529 }
1530 let desc = TestDescAndFn {
1531 desc: TestDesc {
1532 name: StaticTestName("whatever"),
1533 ignore: false,
1534 should_panic: ShouldPanic::Yes,
1535 },
1536 testfn: DynTestFn(Box::new(move |()| f())),
1537 };
1538 let (tx, rx) = channel();
1539 run_test(&TestOpts::new(), false, desc, tx);
1540 let (_, res, _) = rx.recv().unwrap();
1541 assert!(res == TrOk);
1542 }
1543
1544 #[test]
1545 fn test_should_panic_good_message() {
1546 fn f() {
1547 panic!("an error message");
1548 }
1549 let desc = TestDescAndFn {
1550 desc: TestDesc {
1551 name: StaticTestName("whatever"),
1552 ignore: false,
1553 should_panic: ShouldPanic::YesWithMessage("error message"),
1554 },
1555 testfn: DynTestFn(Box::new(move |()| f())),
1556 };
1557 let (tx, rx) = channel();
1558 run_test(&TestOpts::new(), false, desc, tx);
1559 let (_, res, _) = rx.recv().unwrap();
1560 assert!(res == TrOk);
1561 }
1562
1563 #[test]
1564 fn test_should_panic_bad_message() {
1565 fn f() {
1566 panic!("an error message");
1567 }
1568 let desc = TestDescAndFn {
1569 desc: TestDesc {
1570 name: StaticTestName("whatever"),
1571 ignore: false,
1572 should_panic: ShouldPanic::YesWithMessage("foobar"),
1573 },
1574 testfn: DynTestFn(Box::new(move |()| f())),
1575 };
1576 let (tx, rx) = channel();
1577 run_test(&TestOpts::new(), false, desc, tx);
1578 let (_, res, _) = rx.recv().unwrap();
1579 assert!(res == TrFailed);
1580 }
1581
1582 #[test]
1583 fn test_should_panic_but_succeeds() {
1584 fn f() {}
1585 let desc = TestDescAndFn {
1586 desc: TestDesc {
1587 name: StaticTestName("whatever"),
1588 ignore: false,
1589 should_panic: ShouldPanic::Yes,
1590 },
1591 testfn: DynTestFn(Box::new(move |()| f())),
1592 };
1593 let (tx, rx) = channel();
1594 run_test(&TestOpts::new(), false, desc, tx);
1595 let (_, res, _) = rx.recv().unwrap();
1596 assert!(res == TrFailed);
1597 }
1598
1599 #[test]
1600 fn parse_ignored_flag() {
1601 let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
1602 let opts = match parse_opts(&args) {
1603 Some(Ok(o)) => o,
1604 _ => panic!("Malformed arg in parse_ignored_flag"),
1605 };
1606 assert!((opts.run_ignored));
1607 }
1608
1609 #[test]
1610 pub fn filter_for_ignored_option() {
1611 // When we run ignored tests the test filter should filter out all the
1612 // unignored tests and flip the ignore flag on the rest to false
1613
1614 let mut opts = TestOpts::new();
1615 opts.run_tests = true;
1616 opts.run_ignored = true;
1617
1618 let tests = vec![TestDescAndFn {
1619 desc: TestDesc {
1620 name: StaticTestName("1"),
1621 ignore: true,
1622 should_panic: ShouldPanic::No,
1623 },
1624 testfn: DynTestFn(Box::new(move |()| {})),
1625 },
1626 TestDescAndFn {
1627 desc: TestDesc {
1628 name: StaticTestName("2"),
1629 ignore: false,
1630 should_panic: ShouldPanic::No,
1631 },
1632 testfn: DynTestFn(Box::new(move |()| {})),
1633 }];
1634 let filtered = filter_tests(&opts, tests);
1635
1636 assert_eq!(filtered.len(), 1);
1637 assert_eq!(filtered[0].desc.name.to_string(), "1");
1638 assert!(!filtered[0].desc.ignore);
1639 }
1640
1641 #[test]
1642 pub fn sort_tests() {
1643 let mut opts = TestOpts::new();
1644 opts.run_tests = true;
1645
1646 let names = vec!["sha1::test".to_string(),
1647 "isize::test_to_str".to_string(),
1648 "isize::test_pow".to_string(),
1649 "test::do_not_run_ignored_tests".to_string(),
1650 "test::ignored_tests_result_in_ignored".to_string(),
1651 "test::first_free_arg_should_be_a_filter".to_string(),
1652 "test::parse_ignored_flag".to_string(),
1653 "test::filter_for_ignored_option".to_string(),
1654 "test::sort_tests".to_string()];
1655 let tests = {
1656 fn testfn() {}
1657 let mut tests = Vec::new();
1658 for name in &names {
1659 let test = TestDescAndFn {
1660 desc: TestDesc {
1661 name: DynTestName((*name).clone()),
1662 ignore: false,
1663 should_panic: ShouldPanic::No,
1664 },
1665 testfn: DynTestFn(Box::new(move |()| testfn())),
1666 };
1667 tests.push(test);
1668 }
1669 tests
1670 };
1671 let filtered = filter_tests(&opts, tests);
1672
1673 let expected = vec!["isize::test_pow".to_string(),
1674 "isize::test_to_str".to_string(),
1675 "sha1::test".to_string(),
1676 "test::do_not_run_ignored_tests".to_string(),
1677 "test::filter_for_ignored_option".to_string(),
1678 "test::first_free_arg_should_be_a_filter".to_string(),
1679 "test::ignored_tests_result_in_ignored".to_string(),
1680 "test::parse_ignored_flag".to_string(),
1681 "test::sort_tests".to_string()];
1682
1683 for (a, b) in expected.iter().zip(filtered) {
1684 assert!(*a == b.desc.name.to_string());
1685 }
1686 }
1687
1688 #[test]
1689 pub fn test_metricmap_compare() {
1690 let mut m1 = MetricMap::new();
1691 let mut m2 = MetricMap::new();
1692 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1693 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1694
1695 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1696 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1697
1698 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1699 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1700
1701 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1702 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1703
1704 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1705 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1706
1707 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1708 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1709 }
1710 }