]> git.proxmox.com Git - rustc.git/blob - src/libtest/lib.rs
Imported Upstream version 1.6.0+dfsg1
[rustc.git] / src / libtest / lib.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Support code for rustc's built in unit-test and micro-benchmarking
12 //! framework.
13 //!
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
18 //!
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
20
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
24 // build off of.
25
26 // Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
27 #![cfg_attr(stage0, feature(custom_attribute))]
28 #![crate_name = "test"]
29 #![unstable(feature = "test", issue = "27812")]
30 #![cfg_attr(stage0, staged_api)]
31 #![crate_type = "rlib"]
32 #![crate_type = "dylib"]
33 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
34 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
35 html_root_url = "https://doc.rust-lang.org/nightly/",
36 test(attr(deny(warnings))))]
37
38 #![feature(asm)]
39 #![feature(box_syntax)]
40 #![feature(fnbox)]
41 #![feature(libc)]
42 #![feature(rustc_private)]
43 #![feature(set_stdio)]
44 #![feature(staged_api)]
45 #![feature(time2)]
46
47 extern crate getopts;
48 extern crate serialize;
49 extern crate serialize as rustc_serialize;
50 extern crate term;
51 extern crate libc;
52
53 pub use self::TestFn::*;
54 pub use self::ColorConfig::*;
55 pub use self::TestResult::*;
56 pub use self::TestName::*;
57 use self::TestEvent::*;
58 use self::NamePadding::*;
59 use self::OutputLocation::*;
60
61 use stats::Stats;
62 use getopts::{OptGroup, optflag, optopt};
63 use serialize::Encodable;
64 use std::boxed::FnBox;
65 use term::Terminal;
66 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
67
68 use std::any::Any;
69 use std::cmp;
70 use std::collections::BTreeMap;
71 use std::env;
72 use std::fmt;
73 use std::fs::File;
74 use std::io::prelude::*;
75 use std::io;
76 use std::iter::repeat;
77 use std::path::PathBuf;
78 use std::sync::mpsc::{channel, Sender};
79 use std::sync::{Arc, Mutex};
80 use std::thread;
81 use std::time::{Instant, Duration};
82
83 // to be used by rustc to compile tests in libtest
84 pub mod test {
85 pub use {Bencher, TestName, TestResult, TestDesc,
86 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
87 Metric, MetricMap,
88 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
89 run_test, test_main, test_main_static, filter_tests,
90 parse_opts, StaticBenchFn, ShouldPanic};
91 }
92
93 pub mod stats;
94
95 // The name of a test. By convention this follows the rules for rust
96 // paths; i.e. it should be a series of identifiers separated by double
97 // colons. This way if some test runner wants to arrange the tests
98 // hierarchically it may.
99
100 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
101 pub enum TestName {
102 StaticTestName(&'static str),
103 DynTestName(String)
104 }
105 impl TestName {
106 fn as_slice(&self) -> &str {
107 match *self {
108 StaticTestName(s) => s,
109 DynTestName(ref s) => s
110 }
111 }
112 }
113 impl fmt::Display for TestName {
114 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
115 fmt::Display::fmt(self.as_slice(), f)
116 }
117 }
118
119 #[derive(Clone, Copy)]
120 enum NamePadding {
121 PadNone,
122 PadOnRight,
123 }
124
125 impl TestDesc {
126 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
127 let mut name = String::from(self.name.as_slice());
128 let fill = column_count.saturating_sub(name.len());
129 let pad = repeat(" ").take(fill).collect::<String>();
130 match align {
131 PadNone => name,
132 PadOnRight => {
133 name.push_str(&pad);
134 name
135 }
136 }
137 }
138 }
139
140 /// Represents a benchmark function.
141 pub trait TDynBenchFn: Send {
142 fn run(&self, harness: &mut Bencher);
143 }
144
145 // A function that runs a test. If the function returns successfully,
146 // the test succeeds; if the function panics then the test fails. We
147 // may need to come up with a more clever definition of test in order
148 // to support isolation of tests into threads.
149 pub enum TestFn {
150 StaticTestFn(fn()),
151 StaticBenchFn(fn(&mut Bencher)),
152 StaticMetricFn(fn(&mut MetricMap)),
153 DynTestFn(Box<FnBox() + Send>),
154 DynMetricFn(Box<FnBox(&mut MetricMap)+Send>),
155 DynBenchFn(Box<TDynBenchFn+'static>)
156 }
157
158 impl TestFn {
159 fn padding(&self) -> NamePadding {
160 match *self {
161 StaticTestFn(..) => PadNone,
162 StaticBenchFn(..) => PadOnRight,
163 StaticMetricFn(..) => PadOnRight,
164 DynTestFn(..) => PadNone,
165 DynMetricFn(..) => PadOnRight,
166 DynBenchFn(..) => PadOnRight,
167 }
168 }
169 }
170
171 impl fmt::Debug for TestFn {
172 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
173 f.write_str(match *self {
174 StaticTestFn(..) => "StaticTestFn(..)",
175 StaticBenchFn(..) => "StaticBenchFn(..)",
176 StaticMetricFn(..) => "StaticMetricFn(..)",
177 DynTestFn(..) => "DynTestFn(..)",
178 DynMetricFn(..) => "DynMetricFn(..)",
179 DynBenchFn(..) => "DynBenchFn(..)"
180 })
181 }
182 }
183
184 /// Manager of the benchmarking runs.
185 ///
186 /// This is fed into functions marked with `#[bench]` to allow for
187 /// set-up & tear-down before running a piece of code repeatedly via a
188 /// call to `iter`.
189 #[derive(Copy, Clone)]
190 pub struct Bencher {
191 iterations: u64,
192 dur: Duration,
193 pub bytes: u64,
194 }
195
196 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
197 pub enum ShouldPanic {
198 No,
199 Yes,
200 YesWithMessage(&'static str)
201 }
202
203 // The definition of a single test. A test runner will run a list of
204 // these.
205 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
206 pub struct TestDesc {
207 pub name: TestName,
208 pub ignore: bool,
209 pub should_panic: ShouldPanic,
210 }
211
212 unsafe impl Send for TestDesc {}
213
214 #[derive(Debug)]
215 pub struct TestDescAndFn {
216 pub desc: TestDesc,
217 pub testfn: TestFn,
218 }
219
220 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
221 pub struct Metric {
222 value: f64,
223 noise: f64
224 }
225
226 impl Metric {
227 pub fn new(value: f64, noise: f64) -> Metric {
228 Metric {value: value, noise: noise}
229 }
230 }
231
232 #[derive(PartialEq)]
233 pub struct MetricMap(BTreeMap<String,Metric>);
234
235 impl Clone for MetricMap {
236 fn clone(&self) -> MetricMap {
237 let MetricMap(ref map) = *self;
238 MetricMap(map.clone())
239 }
240 }
241
242 // The default console test runner. It accepts the command line
243 // arguments and a vector of test_descs.
244 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
245 let opts =
246 match parse_opts(args) {
247 Some(Ok(o)) => o,
248 Some(Err(msg)) => panic!("{:?}", msg),
249 None => return
250 };
251 match run_tests_console(&opts, tests) {
252 Ok(true) => {}
253 Ok(false) => std::process::exit(101),
254 Err(e) => panic!("io error when running tests: {:?}", e),
255 }
256 }
257
258 // A variant optimized for invocation with a static test vector.
259 // This will panic (intentionally) when fed any dynamic tests, because
260 // it is copying the static values out into a dynamic vector and cannot
261 // copy dynamic values. It is doing this because from this point on
262 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
263 // semantics into parallel test runners, which in turn requires a Vec<>
264 // rather than a &[].
265 pub fn test_main_static(tests: &[TestDescAndFn]) {
266 let args = env::args().collect::<Vec<_>>();
267 let owned_tests = tests.iter().map(|t| {
268 match t.testfn {
269 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
270 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
271 _ => panic!("non-static tests passed to test::test_main_static")
272 }
273 }).collect();
274 test_main(&args, owned_tests)
275 }
276
277 #[derive(Copy, Clone)]
278 pub enum ColorConfig {
279 AutoColor,
280 AlwaysColor,
281 NeverColor,
282 }
283
284 pub struct TestOpts {
285 pub filter: Option<String>,
286 pub run_ignored: bool,
287 pub run_tests: bool,
288 pub bench_benchmarks: bool,
289 pub logfile: Option<PathBuf>,
290 pub nocapture: bool,
291 pub color: ColorConfig,
292 }
293
294 impl TestOpts {
295 #[cfg(test)]
296 fn new() -> TestOpts {
297 TestOpts {
298 filter: None,
299 run_ignored: false,
300 run_tests: false,
301 bench_benchmarks: false,
302 logfile: None,
303 nocapture: false,
304 color: AutoColor,
305 }
306 }
307 }
308
309 /// Result of parsing the options.
310 pub type OptRes = Result<TestOpts, String>;
311
312 fn optgroups() -> Vec<getopts::OptGroup> {
313 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
314 getopts::optflag("", "test", "Run tests and not benchmarks"),
315 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
316 getopts::optflag("h", "help", "Display this message (longer with --help)"),
317 getopts::optopt("", "logfile", "Write logs to the specified file instead \
318 of stdout", "PATH"),
319 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
320 task, allow printing directly"),
321 getopts::optopt("", "color", "Configure coloring of output:
322 auto = colorize if stdout is a tty and tests are run on serially (default);
323 always = always colorize output;
324 never = never colorize output;", "auto|always|never"))
325 }
326
327 fn usage(binary: &str) {
328 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
329 println!(r#"{usage}
330
331 The FILTER regex is tested against the name of all tests to run, and
332 only those tests that match are run.
333
334 By default, all tests are run in parallel. This can be altered with the
335 RUST_TEST_THREADS environment variable when running tests (set it to 1).
336
337 All tests have their standard output and standard error captured by default.
338 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
339 environment variable. Logging is not captured by default.
340
341 Test Attributes:
342
343 #[test] - Indicates a function is a test to be run. This function
344 takes no arguments.
345 #[bench] - Indicates a function is a benchmark to be run. This
346 function takes one argument (test::Bencher).
347 #[should_panic] - This function (also labeled with #[test]) will only pass if
348 the code causes a panic (an assertion failure or panic!)
349 A message may be provided, which the failure string must
350 contain: #[should_panic(expected = "foo")].
351 #[ignore] - When applied to a function which is already attributed as a
352 test, then the test runner will ignore these tests during
353 normal test runs. Running with --ignored will run these
354 tests."#,
355 usage = getopts::usage(&message, &optgroups()));
356 }
357
358 // Parses command line arguments into test options
359 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
360 let args_ = &args[1..];
361 let matches =
362 match getopts::getopts(args_, &optgroups()) {
363 Ok(m) => m,
364 Err(f) => return Some(Err(f.to_string()))
365 };
366
367 if matches.opt_present("h") { usage(&args[0]); return None; }
368
369 let filter = if !matches.free.is_empty() {
370 Some(matches.free[0].clone())
371 } else {
372 None
373 };
374
375 let run_ignored = matches.opt_present("ignored");
376
377 let logfile = matches.opt_str("logfile");
378 let logfile = logfile.map(|s| PathBuf::from(&s));
379
380 let bench_benchmarks = matches.opt_present("bench");
381 let run_tests = ! bench_benchmarks ||
382 matches.opt_present("test");
383
384 let mut nocapture = matches.opt_present("nocapture");
385 if !nocapture {
386 nocapture = env::var("RUST_TEST_NOCAPTURE").is_ok();
387 }
388
389 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
390 Some("auto") | None => AutoColor,
391 Some("always") => AlwaysColor,
392 Some("never") => NeverColor,
393
394 Some(v) => return Some(Err(format!("argument for --color must be \
395 auto, always, or never (was {})",
396 v))),
397 };
398
399 let test_opts = TestOpts {
400 filter: filter,
401 run_ignored: run_ignored,
402 run_tests: run_tests,
403 bench_benchmarks: bench_benchmarks,
404 logfile: logfile,
405 nocapture: nocapture,
406 color: color,
407 };
408
409 Some(Ok(test_opts))
410 }
411
412 #[derive(Clone, PartialEq)]
413 pub struct BenchSamples {
414 ns_iter_summ: stats::Summary,
415 mb_s: usize,
416 }
417
418 #[derive(Clone, PartialEq)]
419 pub enum TestResult {
420 TrOk,
421 TrFailed,
422 TrIgnored,
423 TrMetrics(MetricMap),
424 TrBench(BenchSamples),
425 }
426
427 unsafe impl Send for TestResult {}
428
429 enum OutputLocation<T> {
430 Pretty(Box<term::StdoutTerminal>),
431 Raw(T),
432 }
433
434 struct ConsoleTestState<T> {
435 log_out: Option<File>,
436 out: OutputLocation<T>,
437 use_color: bool,
438 total: usize,
439 passed: usize,
440 failed: usize,
441 ignored: usize,
442 measured: usize,
443 metrics: MetricMap,
444 failures: Vec<(TestDesc, Vec<u8> )> ,
445 max_name_len: usize, // number of columns to fill when aligning names
446 }
447
448 impl<T: Write> ConsoleTestState<T> {
449 pub fn new(opts: &TestOpts,
450 _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
451 let log_out = match opts.logfile {
452 Some(ref path) => Some(try!(File::create(path))),
453 None => None
454 };
455 let out = match term::stdout() {
456 None => Raw(io::stdout()),
457 Some(t) => Pretty(t)
458 };
459
460 Ok(ConsoleTestState {
461 out: out,
462 log_out: log_out,
463 use_color: use_color(opts),
464 total: 0,
465 passed: 0,
466 failed: 0,
467 ignored: 0,
468 measured: 0,
469 metrics: MetricMap::new(),
470 failures: Vec::new(),
471 max_name_len: 0,
472 })
473 }
474
475 pub fn write_ok(&mut self) -> io::Result<()> {
476 self.write_pretty("ok", term::color::GREEN)
477 }
478
479 pub fn write_failed(&mut self) -> io::Result<()> {
480 self.write_pretty("FAILED", term::color::RED)
481 }
482
483 pub fn write_ignored(&mut self) -> io::Result<()> {
484 self.write_pretty("ignored", term::color::YELLOW)
485 }
486
487 pub fn write_metric(&mut self) -> io::Result<()> {
488 self.write_pretty("metric", term::color::CYAN)
489 }
490
491 pub fn write_bench(&mut self) -> io::Result<()> {
492 self.write_pretty("bench", term::color::CYAN)
493 }
494
495 pub fn write_pretty(&mut self,
496 word: &str,
497 color: term::color::Color) -> io::Result<()> {
498 match self.out {
499 Pretty(ref mut term) => {
500 if self.use_color {
501 try!(term.fg(color));
502 }
503 try!(term.write_all(word.as_bytes()));
504 if self.use_color {
505 try!(term.reset());
506 }
507 term.flush()
508 }
509 Raw(ref mut stdout) => {
510 try!(stdout.write_all(word.as_bytes()));
511 stdout.flush()
512 }
513 }
514 }
515
516 pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
517 match self.out {
518 Pretty(ref mut term) => {
519 try!(term.write_all(s.as_bytes()));
520 term.flush()
521 },
522 Raw(ref mut stdout) => {
523 try!(stdout.write_all(s.as_bytes()));
524 stdout.flush()
525 },
526 }
527 }
528
529 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
530 self.total = len;
531 let noun = if len != 1 { "tests" } else { "test" };
532 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
533 }
534
535 pub fn write_test_start(&mut self, test: &TestDesc,
536 align: NamePadding) -> io::Result<()> {
537 let name = test.padded_name(self.max_name_len, align);
538 self.write_plain(&format!("test {} ... ", name))
539 }
540
541 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
542 try!(match *result {
543 TrOk => self.write_ok(),
544 TrFailed => self.write_failed(),
545 TrIgnored => self.write_ignored(),
546 TrMetrics(ref mm) => {
547 try!(self.write_metric());
548 self.write_plain(&format!(": {}", mm.fmt_metrics()))
549 }
550 TrBench(ref bs) => {
551 try!(self.write_bench());
552
553 try!(self.write_plain(&format!(": {}", fmt_bench_samples(bs))));
554
555 Ok(())
556 }
557 });
558 self.write_plain("\n")
559 }
560
561 pub fn write_log(&mut self, test: &TestDesc,
562 result: &TestResult) -> io::Result<()> {
563 match self.log_out {
564 None => Ok(()),
565 Some(ref mut o) => {
566 let s = format!("{} {}\n", match *result {
567 TrOk => "ok".to_owned(),
568 TrFailed => "failed".to_owned(),
569 TrIgnored => "ignored".to_owned(),
570 TrMetrics(ref mm) => mm.fmt_metrics(),
571 TrBench(ref bs) => fmt_bench_samples(bs)
572 }, test.name);
573 o.write_all(s.as_bytes())
574 }
575 }
576 }
577
578 pub fn write_failures(&mut self) -> io::Result<()> {
579 try!(self.write_plain("\nfailures:\n"));
580 let mut failures = Vec::new();
581 let mut fail_out = String::new();
582 for &(ref f, ref stdout) in &self.failures {
583 failures.push(f.name.to_string());
584 if !stdout.is_empty() {
585 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
586 let output = String::from_utf8_lossy(stdout);
587 fail_out.push_str(&output);
588 fail_out.push_str("\n");
589 }
590 }
591 if !fail_out.is_empty() {
592 try!(self.write_plain("\n"));
593 try!(self.write_plain(&fail_out));
594 }
595
596 try!(self.write_plain("\nfailures:\n"));
597 failures.sort();
598 for name in &failures {
599 try!(self.write_plain(&format!(" {}\n", name)));
600 }
601 Ok(())
602 }
603
604 pub fn write_run_finish(&mut self) -> io::Result<bool> {
605 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
606
607 let success = self.failed == 0;
608 if !success {
609 try!(self.write_failures());
610 }
611
612 try!(self.write_plain("\ntest result: "));
613 if success {
614 // There's no parallelism at this point so it's safe to use color
615 try!(self.write_ok());
616 } else {
617 try!(self.write_failed());
618 }
619 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
620 self.passed, self.failed, self.ignored, self.measured);
621 try!(self.write_plain(&s));
622 return Ok(success);
623 }
624 }
625
626 // Format a number with thousands separators
627 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
628 use std::fmt::Write;
629 let mut output = String::new();
630 let mut trailing = false;
631 for &pow in &[9, 6, 3, 0] {
632 let base = 10_usize.pow(pow);
633 if pow == 0 || trailing || n / base != 0 {
634 if !trailing {
635 output.write_fmt(format_args!("{}", n / base)).unwrap();
636 } else {
637 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
638 }
639 if pow != 0 {
640 output.push(sep);
641 }
642 trailing = true;
643 }
644 n %= base;
645 }
646
647 output
648 }
649
650 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
651 use std::fmt::Write;
652 let mut output = String::new();
653
654 let median = bs.ns_iter_summ.median as usize;
655 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
656
657 output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
658 fmt_thousands_sep(median, ','),
659 fmt_thousands_sep(deviation, ','))).unwrap();
660 if bs.mb_s != 0 {
661 output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
662 }
663 output
664 }
665
666 // A simple console test runner
667 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::Result<bool> {
668
669 fn callback<T: Write>(event: &TestEvent,
670 st: &mut ConsoleTestState<T>) -> io::Result<()> {
671 match (*event).clone() {
672 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
673 TeWait(ref test, padding) => st.write_test_start(test, padding),
674 TeResult(test, result, stdout) => {
675 try!(st.write_log(&test, &result));
676 try!(st.write_result(&result));
677 match result {
678 TrOk => st.passed += 1,
679 TrIgnored => st.ignored += 1,
680 TrMetrics(mm) => {
681 let tname = test.name;
682 let MetricMap(mm) = mm;
683 for (k,v) in &mm {
684 st.metrics
685 .insert_metric(&format!("{}.{}",
686 tname,
687 k),
688 v.value,
689 v.noise);
690 }
691 st.measured += 1
692 }
693 TrBench(bs) => {
694 st.metrics.insert_metric(test.name.as_slice(),
695 bs.ns_iter_summ.median,
696 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
697 st.measured += 1
698 }
699 TrFailed => {
700 st.failed += 1;
701 st.failures.push((test, stdout));
702 }
703 }
704 Ok(())
705 }
706 }
707 }
708
709 let mut st = try!(ConsoleTestState::new(opts, None::<io::Stdout>));
710 fn len_if_padded(t: &TestDescAndFn) -> usize {
711 match t.testfn.padding() {
712 PadNone => 0,
713 PadOnRight => t.desc.name.as_slice().len(),
714 }
715 }
716 match tests.iter().max_by_key(|t|len_if_padded(*t)) {
717 Some(t) => {
718 let n = t.desc.name.as_slice();
719 st.max_name_len = n.len();
720 },
721 None => {}
722 }
723 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
724 return st.write_run_finish();
725 }
726
727 #[test]
728 fn should_sort_failures_before_printing_them() {
729 let test_a = TestDesc {
730 name: StaticTestName("a"),
731 ignore: false,
732 should_panic: ShouldPanic::No
733 };
734
735 let test_b = TestDesc {
736 name: StaticTestName("b"),
737 ignore: false,
738 should_panic: ShouldPanic::No
739 };
740
741 let mut st = ConsoleTestState {
742 log_out: None,
743 out: Raw(Vec::new()),
744 use_color: false,
745 total: 0,
746 passed: 0,
747 failed: 0,
748 ignored: 0,
749 measured: 0,
750 max_name_len: 10,
751 metrics: MetricMap::new(),
752 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
753 };
754
755 st.write_failures().unwrap();
756 let s = match st.out {
757 Raw(ref m) => String::from_utf8_lossy(&m[..]),
758 Pretty(_) => unreachable!()
759 };
760
761 let apos = s.find("a").unwrap();
762 let bpos = s.find("b").unwrap();
763 assert!(apos < bpos);
764 }
765
766 fn use_color(opts: &TestOpts) -> bool {
767 match opts.color {
768 AutoColor => !opts.nocapture && stdout_isatty(),
769 AlwaysColor => true,
770 NeverColor => false,
771 }
772 }
773
774 #[cfg(unix)]
775 fn stdout_isatty() -> bool {
776 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
777 }
778 #[cfg(windows)]
779 fn stdout_isatty() -> bool {
780 type DWORD = u32;
781 type BOOL = i32;
782 type HANDLE = *mut u8;
783 type LPDWORD = *mut u32;
784 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
785 extern "system" {
786 fn GetStdHandle(which: DWORD) -> HANDLE;
787 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
788 }
789 unsafe {
790 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
791 let mut out = 0;
792 GetConsoleMode(handle, &mut out) != 0
793 }
794 }
795
796 #[derive(Clone)]
797 enum TestEvent {
798 TeFiltered(Vec<TestDesc> ),
799 TeWait(TestDesc, NamePadding),
800 TeResult(TestDesc, TestResult, Vec<u8> ),
801 }
802
803 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
804
805
806 fn run_tests<F>(opts: &TestOpts,
807 tests: Vec<TestDescAndFn> ,
808 mut callback: F) -> io::Result<()> where
809 F: FnMut(TestEvent) -> io::Result<()>,
810 {
811 let mut filtered_tests = filter_tests(opts, tests);
812 if !opts.bench_benchmarks {
813 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
814 }
815
816 let filtered_descs = filtered_tests.iter()
817 .map(|t| t.desc.clone())
818 .collect();
819
820 try!(callback(TeFiltered(filtered_descs)));
821
822 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
823 filtered_tests.into_iter().partition(|e| {
824 match e.testfn {
825 StaticTestFn(_) | DynTestFn(_) => true,
826 _ => false
827 }
828 });
829
830 // It's tempting to just spawn all the tests at once, but since we have
831 // many tests that run in other processes we would be making a big mess.
832 let concurrency = get_concurrency();
833
834 let mut remaining = filtered_tests;
835 remaining.reverse();
836 let mut pending = 0;
837
838 let (tx, rx) = channel::<MonitorMsg>();
839
840 while pending > 0 || !remaining.is_empty() {
841 while pending < concurrency && !remaining.is_empty() {
842 let test = remaining.pop().unwrap();
843 if concurrency == 1 {
844 // We are doing one test at a time so we can print the name
845 // of the test before we run it. Useful for debugging tests
846 // that hang forever.
847 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
848 }
849 run_test(opts, !opts.run_tests, test, tx.clone());
850 pending += 1;
851 }
852
853 let (desc, result, stdout) = rx.recv().unwrap();
854 if concurrency != 1 {
855 try!(callback(TeWait(desc.clone(), PadNone)));
856 }
857 try!(callback(TeResult(desc, result, stdout)));
858 pending -= 1;
859 }
860
861 if opts.bench_benchmarks {
862 // All benchmarks run at the end, in serial.
863 // (this includes metric fns)
864 for b in filtered_benchs_and_metrics {
865 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
866 run_test(opts, false, b, tx.clone());
867 let (test, result, stdout) = rx.recv().unwrap();
868 try!(callback(TeResult(test, result, stdout)));
869 }
870 }
871 Ok(())
872 }
873
874 #[allow(deprecated)]
875 fn get_concurrency() -> usize {
876 return match env::var("RUST_TEST_THREADS") {
877 Ok(s) => {
878 let opt_n: Option<usize> = s.parse().ok();
879 match opt_n {
880 Some(n) if n > 0 => n,
881 _ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s)
882 }
883 }
884 Err(..) => num_cpus(),
885 };
886
887 #[cfg(windows)]
888 #[allow(bad_style)]
889 fn num_cpus() -> usize {
890 #[repr(C)]
891 struct SYSTEM_INFO {
892 wProcessorArchitecture: u16,
893 wReserved: u16,
894 dwPageSize: u32,
895 lpMinimumApplicationAddress: *mut u8,
896 lpMaximumApplicationAddress: *mut u8,
897 dwActiveProcessorMask: *mut u8,
898 dwNumberOfProcessors: u32,
899 dwProcessorType: u32,
900 dwAllocationGranularity: u32,
901 wProcessorLevel: u16,
902 wProcessorRevision: u16,
903 }
904 extern "system" {
905 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
906 }
907 unsafe {
908 let mut sysinfo = std::mem::zeroed();
909 GetSystemInfo(&mut sysinfo);
910 sysinfo.dwNumberOfProcessors as usize
911 }
912 }
913
914 #[cfg(unix)]
915 fn num_cpus() -> usize {
916 extern { fn rust_get_num_cpus() -> libc::uintptr_t; }
917 unsafe { rust_get_num_cpus() as usize }
918 }
919 }
920
921 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
922 let mut filtered = tests;
923
924 // Remove tests that don't match the test filter
925 filtered = match opts.filter {
926 None => filtered,
927 Some(ref filter) => {
928 filtered.into_iter().filter(|test| {
929 test.desc.name.as_slice().contains(&filter[..])
930 }).collect()
931 }
932 };
933
934 // Maybe pull out the ignored test and unignore them
935 filtered = if !opts.run_ignored {
936 filtered
937 } else {
938 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
939 if test.desc.ignore {
940 let TestDescAndFn {desc, testfn} = test;
941 Some(TestDescAndFn {
942 desc: TestDesc {ignore: false, ..desc},
943 testfn: testfn
944 })
945 } else {
946 None
947 }
948 }
949 filtered.into_iter().filter_map(filter).collect()
950 };
951
952 // Sort the tests alphabetically
953 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
954
955 filtered
956 }
957
958 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
959 // convert benchmarks to tests, if we're not benchmarking them
960 tests.into_iter().map(|x| {
961 let testfn = match x.testfn {
962 DynBenchFn(bench) => {
963 DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
964 }
965 StaticBenchFn(benchfn) => {
966 DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
967 }
968 f => f
969 };
970 TestDescAndFn { desc: x.desc, testfn: testfn }
971 }).collect()
972 }
973
974 pub fn run_test(opts: &TestOpts,
975 force_ignore: bool,
976 test: TestDescAndFn,
977 monitor_ch: Sender<MonitorMsg>) {
978
979 let TestDescAndFn {desc, testfn} = test;
980
981 if force_ignore || desc.ignore {
982 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
983 return;
984 }
985
986 fn run_test_inner(desc: TestDesc,
987 monitor_ch: Sender<MonitorMsg>,
988 nocapture: bool,
989 testfn: Box<FnBox() + Send>) {
990 struct Sink(Arc<Mutex<Vec<u8>>>);
991 impl Write for Sink {
992 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
993 Write::write(&mut *self.0.lock().unwrap(), data)
994 }
995 fn flush(&mut self) -> io::Result<()> { Ok(()) }
996 }
997
998 thread::spawn(move || {
999 let data = Arc::new(Mutex::new(Vec::new()));
1000 let data2 = data.clone();
1001 let cfg = thread::Builder::new().name(match desc.name {
1002 DynTestName(ref name) => name.clone(),
1003 StaticTestName(name) => name.to_owned(),
1004 });
1005
1006 let result_guard = cfg.spawn(move || {
1007 if !nocapture {
1008 io::set_print(box Sink(data2.clone()));
1009 io::set_panic(box Sink(data2));
1010 }
1011 testfn()
1012 }).unwrap();
1013 let test_result = calc_result(&desc, result_guard.join());
1014 let stdout = data.lock().unwrap().to_vec();
1015 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1016 });
1017 }
1018
1019 match testfn {
1020 DynBenchFn(bencher) => {
1021 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1022 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1023 return;
1024 }
1025 StaticBenchFn(benchfn) => {
1026 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1027 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1028 return;
1029 }
1030 DynMetricFn(f) => {
1031 let mut mm = MetricMap::new();
1032 f.call_box((&mut mm,));
1033 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1034 return;
1035 }
1036 StaticMetricFn(f) => {
1037 let mut mm = MetricMap::new();
1038 f(&mut mm);
1039 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1040 return;
1041 }
1042 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1043 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1044 Box::new(f))
1045 }
1046 }
1047
1048 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
1049 match (&desc.should_panic, task_result) {
1050 (&ShouldPanic::No, Ok(())) |
1051 (&ShouldPanic::Yes, Err(_)) => TrOk,
1052 (&ShouldPanic::YesWithMessage(msg), Err(ref err))
1053 if err.downcast_ref::<String>()
1054 .map(|e| &**e)
1055 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1056 .map(|e| e.contains(msg))
1057 .unwrap_or(false) => TrOk,
1058 _ => TrFailed,
1059 }
1060 }
1061
1062 impl MetricMap {
1063
1064 pub fn new() -> MetricMap {
1065 MetricMap(BTreeMap::new())
1066 }
1067
1068 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1069 /// must be non-negative. The `noise` indicates the uncertainty of the
1070 /// metric, which doubles as the "noise range" of acceptable
1071 /// pairwise-regressions on this named value, when comparing from one
1072 /// metric to the next using `compare_to_old`.
1073 ///
1074 /// If `noise` is positive, then it means this metric is of a value
1075 /// you want to see grow smaller, so a change larger than `noise` in the
1076 /// positive direction represents a regression.
1077 ///
1078 /// If `noise` is negative, then it means this metric is of a value
1079 /// you want to see grow larger, so a change larger than `noise` in the
1080 /// negative direction represents a regression.
1081 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1082 let m = Metric {
1083 value: value,
1084 noise: noise
1085 };
1086 let MetricMap(ref mut map) = *self;
1087 map.insert(name.to_owned(), m);
1088 }
1089
1090 pub fn fmt_metrics(&self) -> String {
1091 let MetricMap(ref mm) = *self;
1092 let v : Vec<String> = mm.iter()
1093 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
1094 v.value, v.noise))
1095 .collect();
1096 v.join(", ")
1097 }
1098 }
1099
1100
1101 // Benchmarking
1102
1103 /// A function that is opaque to the optimizer, to allow benchmarks to
1104 /// pretend to use outputs to assist in avoiding dead-code
1105 /// elimination.
1106 ///
1107 /// This function is a no-op, and does not even read from `dummy`.
1108 #[cfg(not(all(target_os = "nacl", target_arch = "le32")))]
1109 pub fn black_box<T>(dummy: T) -> T {
1110 // we need to "use" the argument in some way LLVM can't
1111 // introspect.
1112 unsafe {asm!("" : : "r"(&dummy))}
1113 dummy
1114 }
1115 #[cfg(all(target_os = "nacl", target_arch = "le32"))]
1116 #[inline(never)]
1117 pub fn black_box<T>(dummy: T) -> T { dummy }
1118
1119
1120 impl Bencher {
1121 /// Callback for benchmark functions to run in their body.
1122 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1123 let start = Instant::now();
1124 let k = self.iterations;
1125 for _ in 0..k {
1126 black_box(inner());
1127 }
1128 self.dur = start.elapsed();
1129 }
1130
1131 pub fn ns_elapsed(&mut self) -> u64 {
1132 self.dur.as_secs() * 1_000_000_000 + (self.dur.subsec_nanos() as u64)
1133 }
1134
1135 pub fn ns_per_iter(&mut self) -> u64 {
1136 if self.iterations == 0 {
1137 0
1138 } else {
1139 self.ns_elapsed() / cmp::max(self.iterations, 1)
1140 }
1141 }
1142
1143 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1144 self.iterations = n;
1145 f(self);
1146 }
1147
1148 // This is a more statistics-driven benchmark algorithm
1149 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary where F: FnMut(&mut Bencher) {
1150 // Initial bench run to get ballpark figure.
1151 let mut n = 1;
1152 self.bench_n(n, |x| f(x));
1153
1154 // Try to estimate iter count for 1ms falling back to 1m
1155 // iterations if first run took < 1ns.
1156 if self.ns_per_iter() == 0 {
1157 n = 1_000_000;
1158 } else {
1159 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1160 }
1161 // if the first run took more than 1ms we don't want to just
1162 // be left doing 0 iterations on every loop. The unfortunate
1163 // side effect of not being able to do as many runs is
1164 // automatically handled by the statistical analysis below
1165 // (i.e. larger error bars).
1166 if n == 0 { n = 1; }
1167
1168 let mut total_run = Duration::new(0, 0);
1169 let samples : &mut [f64] = &mut [0.0_f64; 50];
1170 loop {
1171 let loop_start = Instant::now();
1172
1173 for p in &mut *samples {
1174 self.bench_n(n, |x| f(x));
1175 *p = self.ns_per_iter() as f64;
1176 };
1177
1178 stats::winsorize(samples, 5.0);
1179 let summ = stats::Summary::new(samples);
1180
1181 for p in &mut *samples {
1182 self.bench_n(5 * n, |x| f(x));
1183 *p = self.ns_per_iter() as f64;
1184 };
1185
1186 stats::winsorize(samples, 5.0);
1187 let summ5 = stats::Summary::new(samples);
1188 let loop_run = loop_start.elapsed();
1189
1190 // If we've run for 100ms and seem to have converged to a
1191 // stable median.
1192 if loop_run > Duration::from_millis(100) &&
1193 summ.median_abs_dev_pct < 1.0 &&
1194 summ.median - summ5.median < summ5.median_abs_dev {
1195 return summ5;
1196 }
1197
1198 total_run = total_run + loop_run;
1199 // Longest we ever run for is 3s.
1200 if total_run > Duration::from_secs(3) {
1201 return summ5;
1202 }
1203
1204 // If we overflow here just return the results so far. We check a
1205 // multiplier of 10 because we're about to multiply by 2 and the
1206 // next iteration of the loop will also multiply by 5 (to calculate
1207 // the summ5 result)
1208 n = match n.checked_mul(10) {
1209 Some(_) => n * 2,
1210 None => return summ5,
1211 };
1212 }
1213 }
1214 }
1215
1216 pub mod bench {
1217 use std::cmp;
1218 use std::time::Duration;
1219 use super::{Bencher, BenchSamples};
1220
1221 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1222 let mut bs = Bencher {
1223 iterations: 0,
1224 dur: Duration::new(0, 0),
1225 bytes: 0
1226 };
1227
1228 let ns_iter_summ = bs.auto_bench(f);
1229
1230 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1231 let iter_s = 1_000_000_000 / ns_iter;
1232 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1233
1234 BenchSamples {
1235 ns_iter_summ: ns_iter_summ,
1236 mb_s: mb_s as usize
1237 }
1238 }
1239
1240 pub fn run_once<F>(f: F) where F: FnOnce(&mut Bencher) {
1241 let mut bs = Bencher {
1242 iterations: 0,
1243 dur: Duration::new(0, 0),
1244 bytes: 0
1245 };
1246 bs.bench_n(1, f);
1247 }
1248 }
1249
1250 #[cfg(test)]
1251 mod tests {
1252 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1253 TestDesc, TestDescAndFn, TestOpts, run_test,
1254 MetricMap,
1255 StaticTestName, DynTestName, DynTestFn, ShouldPanic};
1256 use std::sync::mpsc::channel;
1257
1258 #[test]
1259 pub fn do_not_run_ignored_tests() {
1260 fn f() { panic!(); }
1261 let desc = TestDescAndFn {
1262 desc: TestDesc {
1263 name: StaticTestName("whatever"),
1264 ignore: true,
1265 should_panic: ShouldPanic::No,
1266 },
1267 testfn: DynTestFn(Box::new(move|| f())),
1268 };
1269 let (tx, rx) = channel();
1270 run_test(&TestOpts::new(), false, desc, tx);
1271 let (_, res, _) = rx.recv().unwrap();
1272 assert!(res != TrOk);
1273 }
1274
1275 #[test]
1276 pub fn ignored_tests_result_in_ignored() {
1277 fn f() { }
1278 let desc = TestDescAndFn {
1279 desc: TestDesc {
1280 name: StaticTestName("whatever"),
1281 ignore: true,
1282 should_panic: ShouldPanic::No,
1283 },
1284 testfn: DynTestFn(Box::new(move|| f())),
1285 };
1286 let (tx, rx) = channel();
1287 run_test(&TestOpts::new(), false, desc, tx);
1288 let (_, res, _) = rx.recv().unwrap();
1289 assert!(res == TrIgnored);
1290 }
1291
1292 #[test]
1293 fn test_should_panic() {
1294 fn f() { panic!(); }
1295 let desc = TestDescAndFn {
1296 desc: TestDesc {
1297 name: StaticTestName("whatever"),
1298 ignore: false,
1299 should_panic: ShouldPanic::Yes,
1300 },
1301 testfn: DynTestFn(Box::new(move|| f())),
1302 };
1303 let (tx, rx) = channel();
1304 run_test(&TestOpts::new(), false, desc, tx);
1305 let (_, res, _) = rx.recv().unwrap();
1306 assert!(res == TrOk);
1307 }
1308
1309 #[test]
1310 fn test_should_panic_good_message() {
1311 fn f() { panic!("an error message"); }
1312 let desc = TestDescAndFn {
1313 desc: TestDesc {
1314 name: StaticTestName("whatever"),
1315 ignore: false,
1316 should_panic: ShouldPanic::YesWithMessage("error message"),
1317 },
1318 testfn: DynTestFn(Box::new(move|| f())),
1319 };
1320 let (tx, rx) = channel();
1321 run_test(&TestOpts::new(), false, desc, tx);
1322 let (_, res, _) = rx.recv().unwrap();
1323 assert!(res == TrOk);
1324 }
1325
1326 #[test]
1327 fn test_should_panic_bad_message() {
1328 fn f() { panic!("an error message"); }
1329 let desc = TestDescAndFn {
1330 desc: TestDesc {
1331 name: StaticTestName("whatever"),
1332 ignore: false,
1333 should_panic: ShouldPanic::YesWithMessage("foobar"),
1334 },
1335 testfn: DynTestFn(Box::new(move|| f())),
1336 };
1337 let (tx, rx) = channel();
1338 run_test(&TestOpts::new(), false, desc, tx);
1339 let (_, res, _) = rx.recv().unwrap();
1340 assert!(res == TrFailed);
1341 }
1342
1343 #[test]
1344 fn test_should_panic_but_succeeds() {
1345 fn f() { }
1346 let desc = TestDescAndFn {
1347 desc: TestDesc {
1348 name: StaticTestName("whatever"),
1349 ignore: false,
1350 should_panic: ShouldPanic::Yes,
1351 },
1352 testfn: DynTestFn(Box::new(move|| f())),
1353 };
1354 let (tx, rx) = channel();
1355 run_test(&TestOpts::new(), false, desc, tx);
1356 let (_, res, _) = rx.recv().unwrap();
1357 assert!(res == TrFailed);
1358 }
1359
1360 #[test]
1361 fn parse_ignored_flag() {
1362 let args = vec!("progname".to_string(),
1363 "filter".to_string(),
1364 "--ignored".to_string());
1365 let opts = match parse_opts(&args) {
1366 Some(Ok(o)) => o,
1367 _ => panic!("Malformed arg in parse_ignored_flag")
1368 };
1369 assert!((opts.run_ignored));
1370 }
1371
1372 #[test]
1373 pub fn filter_for_ignored_option() {
1374 // When we run ignored tests the test filter should filter out all the
1375 // unignored tests and flip the ignore flag on the rest to false
1376
1377 let mut opts = TestOpts::new();
1378 opts.run_tests = true;
1379 opts.run_ignored = true;
1380
1381 let tests = vec!(
1382 TestDescAndFn {
1383 desc: TestDesc {
1384 name: StaticTestName("1"),
1385 ignore: true,
1386 should_panic: ShouldPanic::No,
1387 },
1388 testfn: DynTestFn(Box::new(move|| {})),
1389 },
1390 TestDescAndFn {
1391 desc: TestDesc {
1392 name: StaticTestName("2"),
1393 ignore: false,
1394 should_panic: ShouldPanic::No,
1395 },
1396 testfn: DynTestFn(Box::new(move|| {})),
1397 });
1398 let filtered = filter_tests(&opts, tests);
1399
1400 assert_eq!(filtered.len(), 1);
1401 assert_eq!(filtered[0].desc.name.to_string(),
1402 "1");
1403 assert!(filtered[0].desc.ignore == false);
1404 }
1405
1406 #[test]
1407 pub fn sort_tests() {
1408 let mut opts = TestOpts::new();
1409 opts.run_tests = true;
1410
1411 let names =
1412 vec!("sha1::test".to_string(),
1413 "isize::test_to_str".to_string(),
1414 "isize::test_pow".to_string(),
1415 "test::do_not_run_ignored_tests".to_string(),
1416 "test::ignored_tests_result_in_ignored".to_string(),
1417 "test::first_free_arg_should_be_a_filter".to_string(),
1418 "test::parse_ignored_flag".to_string(),
1419 "test::filter_for_ignored_option".to_string(),
1420 "test::sort_tests".to_string());
1421 let tests =
1422 {
1423 fn testfn() { }
1424 let mut tests = Vec::new();
1425 for name in &names {
1426 let test = TestDescAndFn {
1427 desc: TestDesc {
1428 name: DynTestName((*name).clone()),
1429 ignore: false,
1430 should_panic: ShouldPanic::No,
1431 },
1432 testfn: DynTestFn(Box::new(testfn)),
1433 };
1434 tests.push(test);
1435 }
1436 tests
1437 };
1438 let filtered = filter_tests(&opts, tests);
1439
1440 let expected =
1441 vec!("isize::test_pow".to_string(),
1442 "isize::test_to_str".to_string(),
1443 "sha1::test".to_string(),
1444 "test::do_not_run_ignored_tests".to_string(),
1445 "test::filter_for_ignored_option".to_string(),
1446 "test::first_free_arg_should_be_a_filter".to_string(),
1447 "test::ignored_tests_result_in_ignored".to_string(),
1448 "test::parse_ignored_flag".to_string(),
1449 "test::sort_tests".to_string());
1450
1451 for (a, b) in expected.iter().zip(filtered) {
1452 assert!(*a == b.desc.name.to_string());
1453 }
1454 }
1455
1456 #[test]
1457 pub fn test_metricmap_compare() {
1458 let mut m1 = MetricMap::new();
1459 let mut m2 = MetricMap::new();
1460 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1461 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1462
1463 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1464 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1465
1466 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1467 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1468
1469 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1470 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1471
1472 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1473 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1474
1475 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1476 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1477 }
1478 }