]> git.proxmox.com Git - rustc.git/blob - src/libtest/lib.rs
b16154861530dc70a61ddefb7beafc615b098bfc
[rustc.git] / src / libtest / lib.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Support code for rustc's built in unit-test and micro-benchmarking
12 //! framework.
13 //!
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
18 //!
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
20
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
24 // build off of.
25
26 // Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
27 #![cfg_attr(stage0, feature(custom_attribute))]
28 #![crate_name = "test"]
29 #![unstable(feature = "test")]
30 #![staged_api]
31 #![crate_type = "rlib"]
32 #![crate_type = "dylib"]
33 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
34 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
35 html_root_url = "http://doc.rust-lang.org/nightly/")]
36
37 #![feature(asm)]
38 #![feature(box_syntax)]
39 #![feature(collections)]
40 #![feature(core)]
41 #![feature(rustc_private)]
42 #![feature(staged_api)]
43 #![feature(std_misc)]
44 #![feature(libc)]
45 #![feature(set_stdio)]
46 #![feature(duration)]
47 #![feature(duration_span)]
48
49 extern crate getopts;
50 extern crate serialize;
51 extern crate serialize as rustc_serialize;
52 extern crate term;
53 extern crate libc;
54
55 pub use self::TestFn::*;
56 pub use self::ColorConfig::*;
57 pub use self::TestResult::*;
58 pub use self::TestName::*;
59 use self::TestEvent::*;
60 use self::NamePadding::*;
61 use self::OutputLocation::*;
62
63 use stats::Stats;
64 use getopts::{OptGroup, optflag, optopt};
65 use serialize::Encodable;
66 use std::boxed::FnBox;
67 use term::Terminal;
68 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
69
70 use std::any::Any;
71 use std::cmp;
72 use std::collections::BTreeMap;
73 use std::env;
74 use std::fmt;
75 use std::fs::File;
76 use std::io::prelude::*;
77 use std::io;
78 use std::iter::repeat;
79 use std::path::PathBuf;
80 use std::sync::mpsc::{channel, Sender};
81 use std::sync::{Arc, Mutex};
82 use std::thread;
83 use std::thunk::Thunk;
84 use std::time::Duration;
85
86 // to be used by rustc to compile tests in libtest
87 pub mod test {
88 pub use {Bencher, TestName, TestResult, TestDesc,
89 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
90 Metric, MetricMap,
91 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
92 run_test, test_main, test_main_static, filter_tests,
93 parse_opts, StaticBenchFn, ShouldPanic};
94 }
95
96 pub mod stats;
97
98 // The name of a test. By convention this follows the rules for rust
99 // paths; i.e. it should be a series of identifiers separated by double
100 // colons. This way if some test runner wants to arrange the tests
101 // hierarchically it may.
102
103 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
104 pub enum TestName {
105 StaticTestName(&'static str),
106 DynTestName(String)
107 }
108 impl TestName {
109 fn as_slice<'a>(&'a self) -> &'a str {
110 match *self {
111 StaticTestName(s) => s,
112 DynTestName(ref s) => s
113 }
114 }
115 }
116 impl fmt::Display for TestName {
117 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
118 fmt::Display::fmt(self.as_slice(), f)
119 }
120 }
121
122 #[derive(Clone, Copy)]
123 enum NamePadding {
124 PadNone,
125 PadOnRight,
126 }
127
128 impl TestDesc {
129 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
130 let mut name = String::from_str(self.name.as_slice());
131 let fill = column_count.saturating_sub(name.len());
132 let pad = repeat(" ").take(fill).collect::<String>();
133 match align {
134 PadNone => name,
135 PadOnRight => {
136 name.push_str(&pad);
137 name
138 }
139 }
140 }
141 }
142
143 /// Represents a benchmark function.
144 pub trait TDynBenchFn: Send {
145 fn run(&self, harness: &mut Bencher);
146 }
147
148 // A function that runs a test. If the function returns successfully,
149 // the test succeeds; if the function panics then the test fails. We
150 // may need to come up with a more clever definition of test in order
151 // to support isolation of tests into threads.
152 pub enum TestFn {
153 StaticTestFn(fn()),
154 StaticBenchFn(fn(&mut Bencher)),
155 StaticMetricFn(fn(&mut MetricMap)),
156 DynTestFn(Thunk<'static>),
157 DynMetricFn(Box<FnBox(&mut MetricMap)+Send>),
158 DynBenchFn(Box<TDynBenchFn+'static>)
159 }
160
161 impl TestFn {
162 fn padding(&self) -> NamePadding {
163 match self {
164 &StaticTestFn(..) => PadNone,
165 &StaticBenchFn(..) => PadOnRight,
166 &StaticMetricFn(..) => PadOnRight,
167 &DynTestFn(..) => PadNone,
168 &DynMetricFn(..) => PadOnRight,
169 &DynBenchFn(..) => PadOnRight,
170 }
171 }
172 }
173
174 impl fmt::Debug for TestFn {
175 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
176 f.write_str(match *self {
177 StaticTestFn(..) => "StaticTestFn(..)",
178 StaticBenchFn(..) => "StaticBenchFn(..)",
179 StaticMetricFn(..) => "StaticMetricFn(..)",
180 DynTestFn(..) => "DynTestFn(..)",
181 DynMetricFn(..) => "DynMetricFn(..)",
182 DynBenchFn(..) => "DynBenchFn(..)"
183 })
184 }
185 }
186
187 /// Manager of the benchmarking runs.
188 ///
189 /// This is fed into functions marked with `#[bench]` to allow for
190 /// set-up & tear-down before running a piece of code repeatedly via a
191 /// call to `iter`.
192 #[derive(Copy, Clone)]
193 pub struct Bencher {
194 iterations: u64,
195 dur: Duration,
196 pub bytes: u64,
197 }
198
199 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
200 pub enum ShouldPanic {
201 No,
202 Yes(Option<&'static str>)
203 }
204
205 // The definition of a single test. A test runner will run a list of
206 // these.
207 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
208 pub struct TestDesc {
209 pub name: TestName,
210 pub ignore: bool,
211 pub should_panic: ShouldPanic,
212 }
213
214 unsafe impl Send for TestDesc {}
215
216 #[derive(Debug)]
217 pub struct TestDescAndFn {
218 pub desc: TestDesc,
219 pub testfn: TestFn,
220 }
221
222 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
223 pub struct Metric {
224 value: f64,
225 noise: f64
226 }
227
228 impl Metric {
229 pub fn new(value: f64, noise: f64) -> Metric {
230 Metric {value: value, noise: noise}
231 }
232 }
233
234 #[derive(PartialEq)]
235 pub struct MetricMap(BTreeMap<String,Metric>);
236
237 impl Clone for MetricMap {
238 fn clone(&self) -> MetricMap {
239 let MetricMap(ref map) = *self;
240 MetricMap(map.clone())
241 }
242 }
243
244 // The default console test runner. It accepts the command line
245 // arguments and a vector of test_descs.
246 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
247 let opts =
248 match parse_opts(args) {
249 Some(Ok(o)) => o,
250 Some(Err(msg)) => panic!("{:?}", msg),
251 None => return
252 };
253 match run_tests_console(&opts, tests) {
254 Ok(true) => {}
255 Ok(false) => panic!("Some tests failed"),
256 Err(e) => panic!("io error when running tests: {:?}", e),
257 }
258 }
259
260 // A variant optimized for invocation with a static test vector.
261 // This will panic (intentionally) when fed any dynamic tests, because
262 // it is copying the static values out into a dynamic vector and cannot
263 // copy dynamic values. It is doing this because from this point on
264 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
265 // semantics into parallel test runners, which in turn requires a Vec<>
266 // rather than a &[].
267 pub fn test_main_static(args: env::Args, tests: &[TestDescAndFn]) {
268 let args = args.collect::<Vec<_>>();
269 let owned_tests = tests.iter().map(|t| {
270 match t.testfn {
271 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
272 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
273 _ => panic!("non-static tests passed to test::test_main_static")
274 }
275 }).collect();
276 test_main(&args, owned_tests)
277 }
278
279 #[derive(Copy, Clone)]
280 pub enum ColorConfig {
281 AutoColor,
282 AlwaysColor,
283 NeverColor,
284 }
285
286 pub struct TestOpts {
287 pub filter: Option<String>,
288 pub run_ignored: bool,
289 pub run_tests: bool,
290 pub bench_benchmarks: bool,
291 pub logfile: Option<PathBuf>,
292 pub nocapture: bool,
293 pub color: ColorConfig,
294 }
295
296 impl TestOpts {
297 #[cfg(test)]
298 fn new() -> TestOpts {
299 TestOpts {
300 filter: None,
301 run_ignored: false,
302 run_tests: false,
303 bench_benchmarks: false,
304 logfile: None,
305 nocapture: false,
306 color: AutoColor,
307 }
308 }
309 }
310
311 /// Result of parsing the options.
312 pub type OptRes = Result<TestOpts, String>;
313
314 fn optgroups() -> Vec<getopts::OptGroup> {
315 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
316 getopts::optflag("", "test", "Run tests and not benchmarks"),
317 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
318 getopts::optflag("h", "help", "Display this message (longer with --help)"),
319 getopts::optopt("", "logfile", "Write logs to the specified file instead \
320 of stdout", "PATH"),
321 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
322 task, allow printing directly"),
323 getopts::optopt("", "color", "Configure coloring of output:
324 auto = colorize if stdout is a tty and tests are run on serially (default);
325 always = always colorize output;
326 never = never colorize output;", "auto|always|never"))
327 }
328
329 fn usage(binary: &str) {
330 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
331 println!(r#"{usage}
332
333 The FILTER regex is tested against the name of all tests to run, and
334 only those tests that match are run.
335
336 By default, all tests are run in parallel. This can be altered with the
337 RUST_TEST_THREADS environment variable when running tests (set it to 1).
338
339 All tests have their standard output and standard error captured by default.
340 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
341 environment variable. Logging is not captured by default.
342
343 Test Attributes:
344
345 #[test] - Indicates a function is a test to be run. This function
346 takes no arguments.
347 #[bench] - Indicates a function is a benchmark to be run. This
348 function takes one argument (test::Bencher).
349 #[should_panic] - This function (also labeled with #[test]) will only pass if
350 the code causes a panic (an assertion failure or panic!)
351 A message may be provided, which the failure string must
352 contain: #[should_panic(expected = "foo")].
353 #[ignore] - When applied to a function which is already attributed as a
354 test, then the test runner will ignore these tests during
355 normal test runs. Running with --ignored will run these
356 tests."#,
357 usage = getopts::usage(&message, &optgroups()));
358 }
359
360 // Parses command line arguments into test options
361 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
362 let args_ = args.tail();
363 let matches =
364 match getopts::getopts(args_, &optgroups()) {
365 Ok(m) => m,
366 Err(f) => return Some(Err(f.to_string()))
367 };
368
369 if matches.opt_present("h") { usage(&args[0]); return None; }
370
371 let filter = if !matches.free.is_empty() {
372 Some(matches.free[0].clone())
373 } else {
374 None
375 };
376
377 let run_ignored = matches.opt_present("ignored");
378
379 let logfile = matches.opt_str("logfile");
380 let logfile = logfile.map(|s| PathBuf::from(&s));
381
382 let bench_benchmarks = matches.opt_present("bench");
383 let run_tests = ! bench_benchmarks ||
384 matches.opt_present("test");
385
386 let mut nocapture = matches.opt_present("nocapture");
387 if !nocapture {
388 nocapture = env::var("RUST_TEST_NOCAPTURE").is_ok();
389 }
390
391 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
392 Some("auto") | None => AutoColor,
393 Some("always") => AlwaysColor,
394 Some("never") => NeverColor,
395
396 Some(v) => return Some(Err(format!("argument for --color must be \
397 auto, always, or never (was {})",
398 v))),
399 };
400
401 let test_opts = TestOpts {
402 filter: filter,
403 run_ignored: run_ignored,
404 run_tests: run_tests,
405 bench_benchmarks: bench_benchmarks,
406 logfile: logfile,
407 nocapture: nocapture,
408 color: color,
409 };
410
411 Some(Ok(test_opts))
412 }
413
414 #[derive(Clone, PartialEq)]
415 pub struct BenchSamples {
416 ns_iter_summ: stats::Summary,
417 mb_s: usize,
418 }
419
420 #[derive(Clone, PartialEq)]
421 pub enum TestResult {
422 TrOk,
423 TrFailed,
424 TrIgnored,
425 TrMetrics(MetricMap),
426 TrBench(BenchSamples),
427 }
428
429 unsafe impl Send for TestResult {}
430
431 enum OutputLocation<T> {
432 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
433 Raw(T),
434 }
435
436 struct ConsoleTestState<T> {
437 log_out: Option<File>,
438 out: OutputLocation<T>,
439 use_color: bool,
440 total: usize,
441 passed: usize,
442 failed: usize,
443 ignored: usize,
444 measured: usize,
445 metrics: MetricMap,
446 failures: Vec<(TestDesc, Vec<u8> )> ,
447 max_name_len: usize, // number of columns to fill when aligning names
448 }
449
450 impl<T: Write> ConsoleTestState<T> {
451 pub fn new(opts: &TestOpts,
452 _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
453 let log_out = match opts.logfile {
454 Some(ref path) => Some(try!(File::create(path))),
455 None => None
456 };
457 let out = match term::stdout() {
458 None => Raw(io::stdout()),
459 Some(t) => Pretty(t)
460 };
461
462 Ok(ConsoleTestState {
463 out: out,
464 log_out: log_out,
465 use_color: use_color(opts),
466 total: 0,
467 passed: 0,
468 failed: 0,
469 ignored: 0,
470 measured: 0,
471 metrics: MetricMap::new(),
472 failures: Vec::new(),
473 max_name_len: 0,
474 })
475 }
476
477 pub fn write_ok(&mut self) -> io::Result<()> {
478 self.write_pretty("ok", term::color::GREEN)
479 }
480
481 pub fn write_failed(&mut self) -> io::Result<()> {
482 self.write_pretty("FAILED", term::color::RED)
483 }
484
485 pub fn write_ignored(&mut self) -> io::Result<()> {
486 self.write_pretty("ignored", term::color::YELLOW)
487 }
488
489 pub fn write_metric(&mut self) -> io::Result<()> {
490 self.write_pretty("metric", term::color::CYAN)
491 }
492
493 pub fn write_bench(&mut self) -> io::Result<()> {
494 self.write_pretty("bench", term::color::CYAN)
495 }
496
497 pub fn write_pretty(&mut self,
498 word: &str,
499 color: term::color::Color) -> io::Result<()> {
500 match self.out {
501 Pretty(ref mut term) => {
502 if self.use_color {
503 try!(term.fg(color));
504 }
505 try!(term.write_all(word.as_bytes()));
506 if self.use_color {
507 try!(term.reset());
508 }
509 term.flush()
510 }
511 Raw(ref mut stdout) => {
512 try!(stdout.write_all(word.as_bytes()));
513 stdout.flush()
514 }
515 }
516 }
517
518 pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
519 match self.out {
520 Pretty(ref mut term) => {
521 try!(term.write_all(s.as_bytes()));
522 term.flush()
523 },
524 Raw(ref mut stdout) => {
525 try!(stdout.write_all(s.as_bytes()));
526 stdout.flush()
527 },
528 }
529 }
530
531 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
532 self.total = len;
533 let noun = if len != 1 { "tests" } else { "test" };
534 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
535 }
536
537 pub fn write_test_start(&mut self, test: &TestDesc,
538 align: NamePadding) -> io::Result<()> {
539 let name = test.padded_name(self.max_name_len, align);
540 self.write_plain(&format!("test {} ... ", name))
541 }
542
543 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
544 try!(match *result {
545 TrOk => self.write_ok(),
546 TrFailed => self.write_failed(),
547 TrIgnored => self.write_ignored(),
548 TrMetrics(ref mm) => {
549 try!(self.write_metric());
550 self.write_plain(&format!(": {}", mm.fmt_metrics()))
551 }
552 TrBench(ref bs) => {
553 try!(self.write_bench());
554
555 try!(self.write_plain(&format!(": {}", fmt_bench_samples(bs))));
556
557 Ok(())
558 }
559 });
560 self.write_plain("\n")
561 }
562
563 pub fn write_log(&mut self, test: &TestDesc,
564 result: &TestResult) -> io::Result<()> {
565 match self.log_out {
566 None => Ok(()),
567 Some(ref mut o) => {
568 let s = format!("{} {}\n", match *result {
569 TrOk => "ok".to_string(),
570 TrFailed => "failed".to_string(),
571 TrIgnored => "ignored".to_string(),
572 TrMetrics(ref mm) => mm.fmt_metrics(),
573 TrBench(ref bs) => fmt_bench_samples(bs)
574 }, test.name);
575 o.write_all(s.as_bytes())
576 }
577 }
578 }
579
580 pub fn write_failures(&mut self) -> io::Result<()> {
581 try!(self.write_plain("\nfailures:\n"));
582 let mut failures = Vec::new();
583 let mut fail_out = String::new();
584 for &(ref f, ref stdout) in &self.failures {
585 failures.push(f.name.to_string());
586 if !stdout.is_empty() {
587 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
588 let output = String::from_utf8_lossy(stdout);
589 fail_out.push_str(&output);
590 fail_out.push_str("\n");
591 }
592 }
593 if !fail_out.is_empty() {
594 try!(self.write_plain("\n"));
595 try!(self.write_plain(&fail_out));
596 }
597
598 try!(self.write_plain("\nfailures:\n"));
599 failures.sort();
600 for name in &failures {
601 try!(self.write_plain(&format!(" {}\n", name)));
602 }
603 Ok(())
604 }
605
606 pub fn write_run_finish(&mut self) -> io::Result<bool> {
607 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
608
609 let success = self.failed == 0;
610 if !success {
611 try!(self.write_failures());
612 }
613
614 try!(self.write_plain("\ntest result: "));
615 if success {
616 // There's no parallelism at this point so it's safe to use color
617 try!(self.write_ok());
618 } else {
619 try!(self.write_failed());
620 }
621 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
622 self.passed, self.failed, self.ignored, self.measured);
623 try!(self.write_plain(&s));
624 return Ok(success);
625 }
626 }
627
628 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
629 if bs.mb_s != 0 {
630 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
631 bs.ns_iter_summ.median as usize,
632 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize,
633 bs.mb_s)
634 } else {
635 format!("{:>9} ns/iter (+/- {})",
636 bs.ns_iter_summ.median as usize,
637 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize)
638 }
639 }
640
641 // A simple console test runner
642 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::Result<bool> {
643
644 fn callback<T: Write>(event: &TestEvent,
645 st: &mut ConsoleTestState<T>) -> io::Result<()> {
646 match (*event).clone() {
647 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
648 TeWait(ref test, padding) => st.write_test_start(test, padding),
649 TeResult(test, result, stdout) => {
650 try!(st.write_log(&test, &result));
651 try!(st.write_result(&result));
652 match result {
653 TrOk => st.passed += 1,
654 TrIgnored => st.ignored += 1,
655 TrMetrics(mm) => {
656 let tname = test.name;
657 let MetricMap(mm) = mm;
658 for (k,v) in &mm {
659 st.metrics
660 .insert_metric(&format!("{}.{}",
661 tname,
662 k),
663 v.value,
664 v.noise);
665 }
666 st.measured += 1
667 }
668 TrBench(bs) => {
669 st.metrics.insert_metric(test.name.as_slice(),
670 bs.ns_iter_summ.median,
671 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
672 st.measured += 1
673 }
674 TrFailed => {
675 st.failed += 1;
676 st.failures.push((test, stdout));
677 }
678 }
679 Ok(())
680 }
681 }
682 }
683
684 let mut st = try!(ConsoleTestState::new(opts, None::<io::Stdout>));
685 fn len_if_padded(t: &TestDescAndFn) -> usize {
686 match t.testfn.padding() {
687 PadNone => 0,
688 PadOnRight => t.desc.name.as_slice().len(),
689 }
690 }
691 match tests.iter().max_by(|t|len_if_padded(*t)) {
692 Some(t) => {
693 let n = t.desc.name.as_slice();
694 st.max_name_len = n.len();
695 },
696 None => {}
697 }
698 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
699 return st.write_run_finish();
700 }
701
702 #[test]
703 fn should_sort_failures_before_printing_them() {
704 let test_a = TestDesc {
705 name: StaticTestName("a"),
706 ignore: false,
707 should_panic: ShouldPanic::No
708 };
709
710 let test_b = TestDesc {
711 name: StaticTestName("b"),
712 ignore: false,
713 should_panic: ShouldPanic::No
714 };
715
716 let mut st = ConsoleTestState {
717 log_out: None,
718 out: Raw(Vec::new()),
719 use_color: false,
720 total: 0,
721 passed: 0,
722 failed: 0,
723 ignored: 0,
724 measured: 0,
725 max_name_len: 10,
726 metrics: MetricMap::new(),
727 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
728 };
729
730 st.write_failures().unwrap();
731 let s = match st.out {
732 Raw(ref m) => String::from_utf8_lossy(&m[..]),
733 Pretty(_) => unreachable!()
734 };
735
736 let apos = s.find("a").unwrap();
737 let bpos = s.find("b").unwrap();
738 assert!(apos < bpos);
739 }
740
741 fn use_color(opts: &TestOpts) -> bool {
742 match opts.color {
743 AutoColor => get_concurrency() == 1 && stdout_isatty(),
744 AlwaysColor => true,
745 NeverColor => false,
746 }
747 }
748
749 #[cfg(unix)]
750 fn stdout_isatty() -> bool {
751 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
752 }
753 #[cfg(windows)]
754 fn stdout_isatty() -> bool {
755 const STD_OUTPUT_HANDLE: libc::DWORD = -11i32 as libc::DWORD;
756 extern "system" {
757 fn GetStdHandle(which: libc::DWORD) -> libc::HANDLE;
758 fn GetConsoleMode(hConsoleHandle: libc::HANDLE,
759 lpMode: libc::LPDWORD) -> libc::BOOL;
760 }
761 unsafe {
762 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
763 let mut out = 0;
764 GetConsoleMode(handle, &mut out) != 0
765 }
766 }
767
768 #[derive(Clone)]
769 enum TestEvent {
770 TeFiltered(Vec<TestDesc> ),
771 TeWait(TestDesc, NamePadding),
772 TeResult(TestDesc, TestResult, Vec<u8> ),
773 }
774
775 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
776
777
778 fn run_tests<F>(opts: &TestOpts,
779 tests: Vec<TestDescAndFn> ,
780 mut callback: F) -> io::Result<()> where
781 F: FnMut(TestEvent) -> io::Result<()>,
782 {
783 let mut filtered_tests = filter_tests(opts, tests);
784 if !opts.bench_benchmarks {
785 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
786 }
787
788 let filtered_descs = filtered_tests.iter()
789 .map(|t| t.desc.clone())
790 .collect();
791
792 try!(callback(TeFiltered(filtered_descs)));
793
794 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
795 filtered_tests.into_iter().partition(|e| {
796 match e.testfn {
797 StaticTestFn(_) | DynTestFn(_) => true,
798 _ => false
799 }
800 });
801
802 // It's tempting to just spawn all the tests at once, but since we have
803 // many tests that run in other processes we would be making a big mess.
804 let concurrency = get_concurrency();
805
806 let mut remaining = filtered_tests;
807 remaining.reverse();
808 let mut pending = 0;
809
810 let (tx, rx) = channel::<MonitorMsg>();
811
812 while pending > 0 || !remaining.is_empty() {
813 while pending < concurrency && !remaining.is_empty() {
814 let test = remaining.pop().unwrap();
815 if concurrency == 1 {
816 // We are doing one test at a time so we can print the name
817 // of the test before we run it. Useful for debugging tests
818 // that hang forever.
819 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
820 }
821 run_test(opts, !opts.run_tests, test, tx.clone());
822 pending += 1;
823 }
824
825 let (desc, result, stdout) = rx.recv().unwrap();
826 if concurrency != 1 {
827 try!(callback(TeWait(desc.clone(), PadNone)));
828 }
829 try!(callback(TeResult(desc, result, stdout)));
830 pending -= 1;
831 }
832
833 if opts.bench_benchmarks {
834 // All benchmarks run at the end, in serial.
835 // (this includes metric fns)
836 for b in filtered_benchs_and_metrics {
837 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
838 run_test(opts, false, b, tx.clone());
839 let (test, result, stdout) = rx.recv().unwrap();
840 try!(callback(TeResult(test, result, stdout)));
841 }
842 }
843 Ok(())
844 }
845
846 #[allow(deprecated)]
847 fn get_concurrency() -> usize {
848 match env::var("RUST_TEST_THREADS") {
849 Ok(s) => {
850 let opt_n: Option<usize> = s.parse().ok();
851 match opt_n {
852 Some(n) if n > 0 => n,
853 _ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s)
854 }
855 }
856 Err(..) => {
857 if std::rt::util::limit_thread_creation_due_to_osx_and_valgrind() {
858 1
859 } else {
860 extern { fn rust_get_num_cpus() -> libc::uintptr_t; }
861 unsafe { rust_get_num_cpus() as usize }
862 }
863 }
864 }
865 }
866
867 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
868 let mut filtered = tests;
869
870 // Remove tests that don't match the test filter
871 filtered = match opts.filter {
872 None => filtered,
873 Some(ref filter) => {
874 filtered.into_iter().filter(|test| {
875 test.desc.name.as_slice().contains(&filter[..])
876 }).collect()
877 }
878 };
879
880 // Maybe pull out the ignored test and unignore them
881 filtered = if !opts.run_ignored {
882 filtered
883 } else {
884 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
885 if test.desc.ignore {
886 let TestDescAndFn {desc, testfn} = test;
887 Some(TestDescAndFn {
888 desc: TestDesc {ignore: false, ..desc},
889 testfn: testfn
890 })
891 } else {
892 None
893 }
894 };
895 filtered.into_iter().filter_map(|x| filter(x)).collect()
896 };
897
898 // Sort the tests alphabetically
899 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
900
901 filtered
902 }
903
904 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
905 // convert benchmarks to tests, if we're not benchmarking them
906 tests.into_iter().map(|x| {
907 let testfn = match x.testfn {
908 DynBenchFn(bench) => {
909 DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
910 }
911 StaticBenchFn(benchfn) => {
912 DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
913 }
914 f => f
915 };
916 TestDescAndFn { desc: x.desc, testfn: testfn }
917 }).collect()
918 }
919
920 pub fn run_test(opts: &TestOpts,
921 force_ignore: bool,
922 test: TestDescAndFn,
923 monitor_ch: Sender<MonitorMsg>) {
924
925 let TestDescAndFn {desc, testfn} = test;
926
927 if force_ignore || desc.ignore {
928 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
929 return;
930 }
931
932 fn run_test_inner(desc: TestDesc,
933 monitor_ch: Sender<MonitorMsg>,
934 nocapture: bool,
935 testfn: Thunk<'static>) {
936 struct Sink(Arc<Mutex<Vec<u8>>>);
937 impl Write for Sink {
938 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
939 Write::write(&mut *self.0.lock().unwrap(), data)
940 }
941 fn flush(&mut self) -> io::Result<()> { Ok(()) }
942 }
943
944 thread::spawn(move || {
945 let data = Arc::new(Mutex::new(Vec::new()));
946 let data2 = data.clone();
947 let cfg = thread::Builder::new().name(match desc.name {
948 DynTestName(ref name) => name.clone().to_string(),
949 StaticTestName(name) => name.to_string(),
950 });
951
952 let result_guard = cfg.spawn(move || {
953 if !nocapture {
954 io::set_print(box Sink(data2.clone()));
955 io::set_panic(box Sink(data2));
956 }
957 testfn()
958 }).unwrap();
959 let test_result = calc_result(&desc, result_guard.join());
960 let stdout = data.lock().unwrap().to_vec();
961 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
962 });
963 }
964
965 match testfn {
966 DynBenchFn(bencher) => {
967 let bs = ::bench::benchmark(|harness| bencher.run(harness));
968 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
969 return;
970 }
971 StaticBenchFn(benchfn) => {
972 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
973 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
974 return;
975 }
976 DynMetricFn(f) => {
977 let mut mm = MetricMap::new();
978 f.call_box((&mut mm,));
979 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
980 return;
981 }
982 StaticMetricFn(f) => {
983 let mut mm = MetricMap::new();
984 f(&mut mm);
985 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
986 return;
987 }
988 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
989 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
990 Box::new(move|| f()))
991 }
992 }
993
994 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
995 match (&desc.should_panic, task_result) {
996 (&ShouldPanic::No, Ok(())) |
997 (&ShouldPanic::Yes(None), Err(_)) => TrOk,
998 (&ShouldPanic::Yes(Some(msg)), Err(ref err))
999 if err.downcast_ref::<String>()
1000 .map(|e| &**e)
1001 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1002 .map(|e| e.contains(msg))
1003 .unwrap_or(false) => TrOk,
1004 _ => TrFailed,
1005 }
1006 }
1007
1008 impl MetricMap {
1009
1010 pub fn new() -> MetricMap {
1011 MetricMap(BTreeMap::new())
1012 }
1013
1014 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1015 /// must be non-negative. The `noise` indicates the uncertainty of the
1016 /// metric, which doubles as the "noise range" of acceptable
1017 /// pairwise-regressions on this named value, when comparing from one
1018 /// metric to the next using `compare_to_old`.
1019 ///
1020 /// If `noise` is positive, then it means this metric is of a value
1021 /// you want to see grow smaller, so a change larger than `noise` in the
1022 /// positive direction represents a regression.
1023 ///
1024 /// If `noise` is negative, then it means this metric is of a value
1025 /// you want to see grow larger, so a change larger than `noise` in the
1026 /// negative direction represents a regression.
1027 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1028 let m = Metric {
1029 value: value,
1030 noise: noise
1031 };
1032 let MetricMap(ref mut map) = *self;
1033 map.insert(name.to_string(), m);
1034 }
1035
1036 pub fn fmt_metrics(&self) -> String {
1037 let MetricMap(ref mm) = *self;
1038 let v : Vec<String> = mm.iter()
1039 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
1040 v.value, v.noise))
1041 .collect();
1042 v.connect(", ")
1043 }
1044 }
1045
1046
1047 // Benchmarking
1048
1049 /// A function that is opaque to the optimizer, to allow benchmarks to
1050 /// pretend to use outputs to assist in avoiding dead-code
1051 /// elimination.
1052 ///
1053 /// This function is a no-op, and does not even read from `dummy`.
1054 pub fn black_box<T>(dummy: T) -> T {
1055 // we need to "use" the argument in some way LLVM can't
1056 // introspect.
1057 unsafe {asm!("" : : "r"(&dummy))}
1058 dummy
1059 }
1060
1061
1062 impl Bencher {
1063 /// Callback for benchmark functions to run in their body.
1064 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1065 self.dur = Duration::span(|| {
1066 let k = self.iterations;
1067 for _ in 0..k {
1068 black_box(inner());
1069 }
1070 });
1071 }
1072
1073 pub fn ns_elapsed(&mut self) -> u64 {
1074 self.dur.secs() * 1_000_000_000 + (self.dur.extra_nanos() as u64)
1075 }
1076
1077 pub fn ns_per_iter(&mut self) -> u64 {
1078 if self.iterations == 0 {
1079 0
1080 } else {
1081 self.ns_elapsed() / cmp::max(self.iterations, 1)
1082 }
1083 }
1084
1085 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1086 self.iterations = n;
1087 f(self);
1088 }
1089
1090 // This is a more statistics-driven benchmark algorithm
1091 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary where F: FnMut(&mut Bencher) {
1092 // Initial bench run to get ballpark figure.
1093 let mut n = 1;
1094 self.bench_n(n, |x| f(x));
1095
1096 // Try to estimate iter count for 1ms falling back to 1m
1097 // iterations if first run took < 1ns.
1098 if self.ns_per_iter() == 0 {
1099 n = 1_000_000;
1100 } else {
1101 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1102 }
1103 // if the first run took more than 1ms we don't want to just
1104 // be left doing 0 iterations on every loop. The unfortunate
1105 // side effect of not being able to do as many runs is
1106 // automatically handled by the statistical analysis below
1107 // (i.e. larger error bars).
1108 if n == 0 { n = 1; }
1109
1110 let mut total_run = Duration::new(0, 0);
1111 let samples : &mut [f64] = &mut [0.0_f64; 50];
1112 loop {
1113 let mut summ = None;
1114 let mut summ5 = None;
1115
1116 let loop_run = Duration::span(|| {
1117
1118 for p in &mut *samples {
1119 self.bench_n(n, |x| f(x));
1120 *p = self.ns_per_iter() as f64;
1121 };
1122
1123 stats::winsorize(samples, 5.0);
1124 summ = Some(stats::Summary::new(samples));
1125
1126 for p in &mut *samples {
1127 self.bench_n(5 * n, |x| f(x));
1128 *p = self.ns_per_iter() as f64;
1129 };
1130
1131 stats::winsorize(samples, 5.0);
1132 summ5 = Some(stats::Summary::new(samples));
1133 });
1134 let summ = summ.unwrap();
1135 let summ5 = summ5.unwrap();
1136
1137 // If we've run for 100ms and seem to have converged to a
1138 // stable median.
1139 if loop_run > Duration::from_millis(100) &&
1140 summ.median_abs_dev_pct < 1.0 &&
1141 summ.median - summ5.median < summ5.median_abs_dev {
1142 return summ5;
1143 }
1144
1145 total_run = total_run + loop_run;
1146 // Longest we ever run for is 3s.
1147 if total_run > Duration::from_secs(3) {
1148 return summ5;
1149 }
1150
1151 // If we overflow here just return the results so far. We check a
1152 // multiplier of 10 because we're about to multiply by 2 and the
1153 // next iteration of the loop will also multiply by 5 (to calculate
1154 // the summ5 result)
1155 n = match n.checked_mul(10) {
1156 Some(_) => n * 2,
1157 None => return summ5,
1158 };
1159 }
1160 }
1161 }
1162
1163 pub mod bench {
1164 use std::cmp;
1165 use std::time::Duration;
1166 use super::{Bencher, BenchSamples};
1167
1168 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1169 let mut bs = Bencher {
1170 iterations: 0,
1171 dur: Duration::new(0, 0),
1172 bytes: 0
1173 };
1174
1175 let ns_iter_summ = bs.auto_bench(f);
1176
1177 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1178 let iter_s = 1_000_000_000 / ns_iter;
1179 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1180
1181 BenchSamples {
1182 ns_iter_summ: ns_iter_summ,
1183 mb_s: mb_s as usize
1184 }
1185 }
1186
1187 pub fn run_once<F>(f: F) where F: FnOnce(&mut Bencher) {
1188 let mut bs = Bencher {
1189 iterations: 0,
1190 dur: Duration::new(0, 0),
1191 bytes: 0
1192 };
1193 bs.bench_n(1, f);
1194 }
1195 }
1196
1197 #[cfg(test)]
1198 mod tests {
1199 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1200 TestDesc, TestDescAndFn, TestOpts, run_test,
1201 MetricMap,
1202 StaticTestName, DynTestName, DynTestFn, ShouldPanic};
1203 use std::thunk::Thunk;
1204 use std::sync::mpsc::channel;
1205
1206 #[test]
1207 pub fn do_not_run_ignored_tests() {
1208 fn f() { panic!(); }
1209 let desc = TestDescAndFn {
1210 desc: TestDesc {
1211 name: StaticTestName("whatever"),
1212 ignore: true,
1213 should_panic: ShouldPanic::No,
1214 },
1215 testfn: DynTestFn(Box::new(move|| f())),
1216 };
1217 let (tx, rx) = channel();
1218 run_test(&TestOpts::new(), false, desc, tx);
1219 let (_, res, _) = rx.recv().unwrap();
1220 assert!(res != TrOk);
1221 }
1222
1223 #[test]
1224 pub fn ignored_tests_result_in_ignored() {
1225 fn f() { }
1226 let desc = TestDescAndFn {
1227 desc: TestDesc {
1228 name: StaticTestName("whatever"),
1229 ignore: true,
1230 should_panic: ShouldPanic::No,
1231 },
1232 testfn: DynTestFn(Box::new(move|| f())),
1233 };
1234 let (tx, rx) = channel();
1235 run_test(&TestOpts::new(), false, desc, tx);
1236 let (_, res, _) = rx.recv().unwrap();
1237 assert!(res == TrIgnored);
1238 }
1239
1240 #[test]
1241 fn test_should_panic() {
1242 fn f() { panic!(); }
1243 let desc = TestDescAndFn {
1244 desc: TestDesc {
1245 name: StaticTestName("whatever"),
1246 ignore: false,
1247 should_panic: ShouldPanic::Yes(None)
1248 },
1249 testfn: DynTestFn(Box::new(move|| f())),
1250 };
1251 let (tx, rx) = channel();
1252 run_test(&TestOpts::new(), false, desc, tx);
1253 let (_, res, _) = rx.recv().unwrap();
1254 assert!(res == TrOk);
1255 }
1256
1257 #[test]
1258 fn test_should_panic_good_message() {
1259 fn f() { panic!("an error message"); }
1260 let desc = TestDescAndFn {
1261 desc: TestDesc {
1262 name: StaticTestName("whatever"),
1263 ignore: false,
1264 should_panic: ShouldPanic::Yes(Some("error message"))
1265 },
1266 testfn: DynTestFn(Box::new(move|| f())),
1267 };
1268 let (tx, rx) = channel();
1269 run_test(&TestOpts::new(), false, desc, tx);
1270 let (_, res, _) = rx.recv().unwrap();
1271 assert!(res == TrOk);
1272 }
1273
1274 #[test]
1275 fn test_should_panic_bad_message() {
1276 fn f() { panic!("an error message"); }
1277 let desc = TestDescAndFn {
1278 desc: TestDesc {
1279 name: StaticTestName("whatever"),
1280 ignore: false,
1281 should_panic: ShouldPanic::Yes(Some("foobar"))
1282 },
1283 testfn: DynTestFn(Box::new(move|| f())),
1284 };
1285 let (tx, rx) = channel();
1286 run_test(&TestOpts::new(), false, desc, tx);
1287 let (_, res, _) = rx.recv().unwrap();
1288 assert!(res == TrFailed);
1289 }
1290
1291 #[test]
1292 fn test_should_panic_but_succeeds() {
1293 fn f() { }
1294 let desc = TestDescAndFn {
1295 desc: TestDesc {
1296 name: StaticTestName("whatever"),
1297 ignore: false,
1298 should_panic: ShouldPanic::Yes(None)
1299 },
1300 testfn: DynTestFn(Box::new(move|| f())),
1301 };
1302 let (tx, rx) = channel();
1303 run_test(&TestOpts::new(), false, desc, tx);
1304 let (_, res, _) = rx.recv().unwrap();
1305 assert!(res == TrFailed);
1306 }
1307
1308 #[test]
1309 fn parse_ignored_flag() {
1310 let args = vec!("progname".to_string(),
1311 "filter".to_string(),
1312 "--ignored".to_string());
1313 let opts = match parse_opts(&args) {
1314 Some(Ok(o)) => o,
1315 _ => panic!("Malformed arg in parse_ignored_flag")
1316 };
1317 assert!((opts.run_ignored));
1318 }
1319
1320 #[test]
1321 pub fn filter_for_ignored_option() {
1322 // When we run ignored tests the test filter should filter out all the
1323 // unignored tests and flip the ignore flag on the rest to false
1324
1325 let mut opts = TestOpts::new();
1326 opts.run_tests = true;
1327 opts.run_ignored = true;
1328
1329 let tests = vec!(
1330 TestDescAndFn {
1331 desc: TestDesc {
1332 name: StaticTestName("1"),
1333 ignore: true,
1334 should_panic: ShouldPanic::No,
1335 },
1336 testfn: DynTestFn(Box::new(move|| {})),
1337 },
1338 TestDescAndFn {
1339 desc: TestDesc {
1340 name: StaticTestName("2"),
1341 ignore: false,
1342 should_panic: ShouldPanic::No,
1343 },
1344 testfn: DynTestFn(Box::new(move|| {})),
1345 });
1346 let filtered = filter_tests(&opts, tests);
1347
1348 assert_eq!(filtered.len(), 1);
1349 assert_eq!(filtered[0].desc.name.to_string(),
1350 "1");
1351 assert!(filtered[0].desc.ignore == false);
1352 }
1353
1354 #[test]
1355 pub fn sort_tests() {
1356 let mut opts = TestOpts::new();
1357 opts.run_tests = true;
1358
1359 let names =
1360 vec!("sha1::test".to_string(),
1361 "isize::test_to_str".to_string(),
1362 "isize::test_pow".to_string(),
1363 "test::do_not_run_ignored_tests".to_string(),
1364 "test::ignored_tests_result_in_ignored".to_string(),
1365 "test::first_free_arg_should_be_a_filter".to_string(),
1366 "test::parse_ignored_flag".to_string(),
1367 "test::filter_for_ignored_option".to_string(),
1368 "test::sort_tests".to_string());
1369 let tests =
1370 {
1371 fn testfn() { }
1372 let mut tests = Vec::new();
1373 for name in &names {
1374 let test = TestDescAndFn {
1375 desc: TestDesc {
1376 name: DynTestName((*name).clone()),
1377 ignore: false,
1378 should_panic: ShouldPanic::No,
1379 },
1380 testfn: DynTestFn(Box::new(testfn)),
1381 };
1382 tests.push(test);
1383 }
1384 tests
1385 };
1386 let filtered = filter_tests(&opts, tests);
1387
1388 let expected =
1389 vec!("isize::test_pow".to_string(),
1390 "isize::test_to_str".to_string(),
1391 "sha1::test".to_string(),
1392 "test::do_not_run_ignored_tests".to_string(),
1393 "test::filter_for_ignored_option".to_string(),
1394 "test::first_free_arg_should_be_a_filter".to_string(),
1395 "test::ignored_tests_result_in_ignored".to_string(),
1396 "test::parse_ignored_flag".to_string(),
1397 "test::sort_tests".to_string());
1398
1399 for (a, b) in expected.iter().zip(filtered.iter()) {
1400 assert!(*a == b.desc.name.to_string());
1401 }
1402 }
1403
1404 #[test]
1405 pub fn test_metricmap_compare() {
1406 let mut m1 = MetricMap::new();
1407 let mut m2 = MetricMap::new();
1408 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1409 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1410
1411 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1412 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1413
1414 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1415 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1416
1417 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1418 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1419
1420 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1421 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1422
1423 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1424 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1425 }
1426 }