1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
27 #![unstable(feature = "test", issue = "27812")]
28 #![crate_type = "rlib"]
29 #![crate_type = "dylib"]
30 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
31 html_favicon_url
= "https://doc.rust-lang.org/favicon.ico",
32 html_root_url
= "https://doc.rust-lang.org/nightly/",
33 test(attr(deny(warnings
))))]
34 #![cfg_attr(not(stage0), deny(warnings))]
38 #![feature(rustc_private)]
39 #![feature(set_stdio)]
40 #![feature(staged_api)]
41 #![cfg_attr(stage0, feature(question_mark))]
42 #![feature(panic_unwind)]
47 extern crate panic_unwind
;
49 pub use self::TestFn
::*;
50 pub use self::ColorConfig
::*;
51 pub use self::TestResult
::*;
52 pub use self::TestName
::*;
53 use self::TestEvent
::*;
54 use self::NamePadding
::*;
55 use self::OutputLocation
::*;
57 use std
::panic
::{catch_unwind, AssertUnwindSafe}
;
60 use std
::collections
::BTreeMap
;
64 use std
::io
::prelude
::*;
66 use std
::iter
::repeat
;
67 use std
::path
::PathBuf
;
68 use std
::sync
::mpsc
::{channel, Sender}
;
69 use std
::sync
::{Arc, Mutex}
;
71 use std
::time
::{Instant, Duration}
;
73 const TEST_WARN_TIMEOUT_S
: u64 = 60;
75 // to be used by rustc to compile tests in libtest
77 pub use {Bencher
, TestName
, TestResult
, TestDesc
, TestDescAndFn
, TestOpts
, TrFailed
,
78 TrIgnored
, TrOk
, Metric
, MetricMap
, StaticTestFn
, StaticTestName
, DynTestName
,
79 DynTestFn
, run_test
, test_main
, test_main_static
, filter_tests
, parse_opts
,
80 StaticBenchFn
, ShouldPanic
};
85 // The name of a test. By convention this follows the rules for rust
86 // paths; i.e. it should be a series of identifiers separated by double
87 // colons. This way if some test runner wants to arrange the tests
88 // hierarchically it may.
90 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
92 StaticTestName(&'
static str),
96 fn as_slice(&self) -> &str {
98 StaticTestName(s
) => s
,
99 DynTestName(ref s
) => s
,
103 impl fmt
::Display
for TestName
{
104 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
105 fmt
::Display
::fmt(self.as_slice(), f
)
109 #[derive(Clone, Copy, PartialEq, Eq)]
116 fn padded_name(&self, column_count
: usize, align
: NamePadding
) -> String
{
117 let mut name
= String
::from(self.name
.as_slice());
118 let fill
= column_count
.saturating_sub(name
.len());
119 let pad
= repeat(" ").take(fill
).collect
::<String
>();
130 /// Represents a benchmark function.
131 pub trait TDynBenchFn
: Send
{
132 fn run(&self, harness
: &mut Bencher
);
135 pub trait FnBox
<T
>: Send
+ '
static {
136 fn call_box(self: Box
<Self>, t
: T
);
139 impl<T
, F
: FnOnce(T
) + Send
+ '
static> FnBox
<T
> for F
{
140 fn call_box(self: Box
<F
>, t
: T
) {
145 // A function that runs a test. If the function returns successfully,
146 // the test succeeds; if the function panics then the test fails. We
147 // may need to come up with a more clever definition of test in order
148 // to support isolation of tests into threads.
151 StaticBenchFn(fn(&mut Bencher
)),
152 StaticMetricFn(fn(&mut MetricMap
)),
153 DynTestFn(Box
<FnBox
<()>>),
154 DynMetricFn(Box
<for<'a
> FnBox
<&'a
mut MetricMap
>>),
155 DynBenchFn(Box
<TDynBenchFn
+ '
static>),
159 fn padding(&self) -> NamePadding
{
161 StaticTestFn(..) => PadNone
,
162 StaticBenchFn(..) => PadOnRight
,
163 StaticMetricFn(..) => PadOnRight
,
164 DynTestFn(..) => PadNone
,
165 DynMetricFn(..) => PadOnRight
,
166 DynBenchFn(..) => PadOnRight
,
171 impl fmt
::Debug
for TestFn
{
172 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
173 f
.write_str(match *self {
174 StaticTestFn(..) => "StaticTestFn(..)",
175 StaticBenchFn(..) => "StaticBenchFn(..)",
176 StaticMetricFn(..) => "StaticMetricFn(..)",
177 DynTestFn(..) => "DynTestFn(..)",
178 DynMetricFn(..) => "DynMetricFn(..)",
179 DynBenchFn(..) => "DynBenchFn(..)",
184 /// Manager of the benchmarking runs.
186 /// This is fed into functions marked with `#[bench]` to allow for
187 /// set-up & tear-down before running a piece of code repeatedly via a
189 #[derive(Copy, Clone)]
196 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
197 pub enum ShouldPanic
{
200 YesWithMessage(&'
static str),
203 // The definition of a single test. A test runner will run a list of
205 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
206 pub struct TestDesc
{
209 pub should_panic
: ShouldPanic
,
213 pub struct TestPaths
{
214 pub file
: PathBuf
, // e.g., compile-test/foo/bar/baz.rs
215 pub base
: PathBuf
, // e.g., compile-test, auxiliary
216 pub relative_dir
: PathBuf
, // e.g., foo/bar
220 pub struct TestDescAndFn
{
225 #[derive(Clone, PartialEq, Debug, Copy)]
232 pub fn new(value
: f64, noise
: f64) -> Metric
{
241 pub struct MetricMap(BTreeMap
<String
, Metric
>);
243 impl Clone
for MetricMap
{
244 fn clone(&self) -> MetricMap
{
245 let MetricMap(ref map
) = *self;
246 MetricMap(map
.clone())
250 // The default console test runner. It accepts the command line
251 // arguments and a vector of test_descs.
252 pub fn test_main(args
: &[String
], tests
: Vec
<TestDescAndFn
>) {
253 let opts
= match parse_opts(args
) {
255 Some(Err(msg
)) => panic
!("{:?}", msg
),
258 match run_tests_console(&opts
, tests
) {
260 Ok(false) => std
::process
::exit(101),
261 Err(e
) => panic
!("io error when running tests: {:?}", e
),
265 // A variant optimized for invocation with a static test vector.
266 // This will panic (intentionally) when fed any dynamic tests, because
267 // it is copying the static values out into a dynamic vector and cannot
268 // copy dynamic values. It is doing this because from this point on
269 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
270 // semantics into parallel test runners, which in turn requires a Vec<>
271 // rather than a &[].
272 pub fn test_main_static(tests
: &[TestDescAndFn
]) {
273 let args
= env
::args().collect
::<Vec
<_
>>();
274 let owned_tests
= tests
.iter()
279 testfn
: StaticTestFn(f
),
280 desc
: t
.desc
.clone(),
283 StaticBenchFn(f
) => {
285 testfn
: StaticBenchFn(f
),
286 desc
: t
.desc
.clone(),
289 _
=> panic
!("non-static tests passed to test::test_main_static"),
293 test_main(&args
, owned_tests
)
296 #[derive(Copy, Clone)]
297 pub enum ColorConfig
{
303 pub struct TestOpts
{
304 pub filter
: Option
<String
>,
305 pub run_ignored
: bool
,
307 pub bench_benchmarks
: bool
,
308 pub logfile
: Option
<PathBuf
>,
310 pub color
: ColorConfig
,
312 pub test_threads
: Option
<usize>,
313 pub skip
: Vec
<String
>,
318 fn new() -> TestOpts
{
323 bench_benchmarks
: false,
334 /// Result of parsing the options.
335 pub type OptRes
= Result
<TestOpts
, String
>;
337 #[cfg_attr(rustfmt, rustfmt_skip)]
338 fn optgroups() -> Vec
<getopts
::OptGroup
> {
339 vec
![getopts
::optflag("", "ignored", "Run ignored tests"),
340 getopts
::optflag("", "test", "Run tests and not benchmarks"),
341 getopts
::optflag("", "bench", "Run benchmarks instead of tests"),
342 getopts
::optflag("h", "help", "Display this message (longer with --help)"),
343 getopts
::optopt("", "logfile", "Write logs to the specified file instead \
345 getopts
::optflag("", "nocapture", "don't capture stdout/stderr of each \
346 task, allow printing directly"),
347 getopts
::optopt("", "test-threads", "Number of threads used for running tests \
348 in parallel", "n_threads"),
349 getopts
::optmulti("", "skip", "Skip tests whose names contain FILTER (this flag can \
350 be used multiple times)","FILTER"),
351 getopts
::optflag("q", "quiet", "Display one character per test instead of one line"),
352 getopts
::optopt("", "color", "Configure coloring of output:
353 auto = colorize if stdout is a tty and tests are run on serially (default);
354 always = always colorize output;
355 never = never colorize output;", "auto|always|never")]
358 fn usage(binary
: &str) {
359 let message
= format
!("Usage: {} [OPTIONS] [FILTER]", binary
);
362 The FILTER string is tested against the name of all tests, and only those
363 tests whose names contain the filter are run.
365 By default, all tests are run in parallel. This can be altered with the
366 --test-threads flag or the RUST_TEST_THREADS environment variable when running
369 All tests have their standard output and standard error captured by default.
370 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
371 environment variable to a value other than "0". Logging is not captured by default.
375 #[test] - Indicates a function is a test to be run. This function
377 #[bench] - Indicates a function is a benchmark to be run. This
378 function takes one argument (test::Bencher).
379 #[should_panic] - This function (also labeled with #[test]) will only pass if
380 the code causes a panic (an assertion failure or panic!)
381 A message may be provided, which the failure string must
382 contain: #[should_panic(expected = "foo")].
383 #[ignore] - When applied to a function which is already attributed as a
384 test, then the test runner will ignore these tests during
385 normal test runs. Running with --ignored will run these
387 usage
= getopts
::usage(&message
, &optgroups()));
390 // Parses command line arguments into test options
391 pub fn parse_opts(args
: &[String
]) -> Option
<OptRes
> {
392 let args_
= &args
[1..];
393 let matches
= match getopts
::getopts(args_
, &optgroups()) {
395 Err(f
) => return Some(Err(f
.to_string())),
398 if matches
.opt_present("h") {
403 let filter
= if !matches
.free
.is_empty() {
404 Some(matches
.free
[0].clone())
409 let run_ignored
= matches
.opt_present("ignored");
410 let quiet
= matches
.opt_present("quiet");
412 let logfile
= matches
.opt_str("logfile");
413 let logfile
= logfile
.map(|s
| PathBuf
::from(&s
));
415 let bench_benchmarks
= matches
.opt_present("bench");
416 let run_tests
= !bench_benchmarks
|| matches
.opt_present("test");
418 let mut nocapture
= matches
.opt_present("nocapture");
420 nocapture
= match env
::var("RUST_TEST_NOCAPTURE") {
421 Ok(val
) => &val
!= "0",
426 let test_threads
= match matches
.opt_str("test-threads") {
428 match n_str
.parse
::<usize>() {
431 return Some(Err(format
!("argument for --test-threads must be a number > 0 \
438 let color
= match matches
.opt_str("color").as_ref().map(|s
| &**s
) {
439 Some("auto") | None
=> AutoColor
,
440 Some("always") => AlwaysColor
,
441 Some("never") => NeverColor
,
444 return Some(Err(format
!("argument for --color must be auto, always, or never (was \
450 let test_opts
= TestOpts
{
452 run_ignored
: run_ignored
,
453 run_tests
: run_tests
,
454 bench_benchmarks
: bench_benchmarks
,
456 nocapture
: nocapture
,
459 test_threads
: test_threads
,
460 skip
: matches
.opt_strs("skip"),
466 #[derive(Clone, PartialEq)]
467 pub struct BenchSamples
{
468 ns_iter_summ
: stats
::Summary
,
472 #[derive(Clone, PartialEq)]
473 pub enum TestResult
{
477 TrMetrics(MetricMap
),
478 TrBench(BenchSamples
),
481 unsafe impl Send
for TestResult {}
483 enum OutputLocation
<T
> {
484 Pretty(Box
<term
::StdoutTerminal
>),
488 struct ConsoleTestState
<T
> {
489 log_out
: Option
<File
>,
490 out
: OutputLocation
<T
>,
499 failures
: Vec
<(TestDesc
, Vec
<u8>)>,
500 max_name_len
: usize, // number of columns to fill when aligning names
503 impl<T
: Write
> ConsoleTestState
<T
> {
504 pub fn new(opts
: &TestOpts
, _
: Option
<T
>) -> io
::Result
<ConsoleTestState
<io
::Stdout
>> {
505 let log_out
= match opts
.logfile
{
506 Some(ref path
) => Some(File
::create(path
)?
),
509 let out
= match term
::stdout() {
510 None
=> Raw(io
::stdout()),
511 Some(t
) => Pretty(t
),
514 Ok(ConsoleTestState
{
517 use_color
: use_color(opts
),
524 metrics
: MetricMap
::new(),
525 failures
: Vec
::new(),
530 pub fn write_ok(&mut self) -> io
::Result
<()> {
531 self.write_short_result("ok", ".", term
::color
::GREEN
)
534 pub fn write_failed(&mut self) -> io
::Result
<()> {
535 self.write_short_result("FAILED", "F", term
::color
::RED
)
538 pub fn write_ignored(&mut self) -> io
::Result
<()> {
539 self.write_short_result("ignored", "i", term
::color
::YELLOW
)
542 pub fn write_metric(&mut self) -> io
::Result
<()> {
543 self.write_pretty("metric", term
::color
::CYAN
)
546 pub fn write_bench(&mut self) -> io
::Result
<()> {
547 self.write_pretty("bench", term
::color
::CYAN
)
550 pub fn write_short_result(&mut self, verbose
: &str, quiet
: &str, color
: term
::color
::Color
)
553 self.write_pretty(quiet
, color
)
555 self.write_pretty(verbose
, color
)?
;
556 self.write_plain("\n")
560 pub fn write_pretty(&mut self, word
: &str, color
: term
::color
::Color
) -> io
::Result
<()> {
562 Pretty(ref mut term
) => {
566 term
.write_all(word
.as_bytes())?
;
572 Raw(ref mut stdout
) => {
573 stdout
.write_all(word
.as_bytes())?
;
579 pub fn write_plain(&mut self, s
: &str) -> io
::Result
<()> {
581 Pretty(ref mut term
) => {
582 term
.write_all(s
.as_bytes())?
;
585 Raw(ref mut stdout
) => {
586 stdout
.write_all(s
.as_bytes())?
;
592 pub fn write_run_start(&mut self, len
: usize) -> io
::Result
<()> {
594 let noun
= if len
!= 1 {
599 self.write_plain(&format
!("\nrunning {} {}\n", len
, noun
))
602 pub fn write_test_start(&mut self, test
: &TestDesc
, align
: NamePadding
) -> io
::Result
<()> {
603 if self.quiet
&& align
!= PadOnRight
{
606 let name
= test
.padded_name(self.max_name_len
, align
);
607 self.write_plain(&format
!("test {} ... ", name
))
611 pub fn write_result(&mut self, result
: &TestResult
) -> io
::Result
<()> {
613 TrOk
=> self.write_ok(),
614 TrFailed
=> self.write_failed(),
615 TrIgnored
=> self.write_ignored(),
616 TrMetrics(ref mm
) => {
617 self.write_metric()?
;
618 self.write_plain(&format
!(": {}\n", mm
.fmt_metrics()))
622 self.write_plain(&format
!(": {}\n", fmt_bench_samples(bs
)))
627 pub fn write_timeout(&mut self, desc
: &TestDesc
) -> io
::Result
<()> {
628 self.write_plain(&format
!("test {} has been running for over {} seconds\n",
630 TEST_WARN_TIMEOUT_S
))
633 pub fn write_log(&mut self, test
: &TestDesc
, result
: &TestResult
) -> io
::Result
<()> {
637 let s
= format
!("{} {}\n",
639 TrOk
=> "ok".to_owned(),
640 TrFailed
=> "failed".to_owned(),
641 TrIgnored
=> "ignored".to_owned(),
642 TrMetrics(ref mm
) => mm
.fmt_metrics(),
643 TrBench(ref bs
) => fmt_bench_samples(bs
),
646 o
.write_all(s
.as_bytes())
651 pub fn write_failures(&mut self) -> io
::Result
<()> {
652 self.write_plain("\nfailures:\n")?
;
653 let mut failures
= Vec
::new();
654 let mut fail_out
= String
::new();
655 for &(ref f
, ref stdout
) in &self.failures
{
656 failures
.push(f
.name
.to_string());
657 if !stdout
.is_empty() {
658 fail_out
.push_str(&format
!("---- {} stdout ----\n\t", f
.name
));
659 let output
= String
::from_utf8_lossy(stdout
);
660 fail_out
.push_str(&output
);
661 fail_out
.push_str("\n");
664 if !fail_out
.is_empty() {
665 self.write_plain("\n")?
;
666 self.write_plain(&fail_out
)?
;
669 self.write_plain("\nfailures:\n")?
;
671 for name
in &failures
{
672 self.write_plain(&format
!(" {}\n", name
))?
;
677 pub fn write_run_finish(&mut self) -> io
::Result
<bool
> {
678 assert
!(self.passed
+ self.failed
+ self.ignored
+ self.measured
== self.total
);
680 let success
= self.failed
== 0;
682 self.write_failures()?
;
685 self.write_plain("\ntest result: ")?
;
687 // There's no parallelism at this point so it's safe to use color
688 self.write_pretty("ok", term
::color
::GREEN
)?
;
690 self.write_pretty("FAILED", term
::color
::RED
)?
;
692 let s
= format
!(". {} passed; {} failed; {} ignored; {} measured\n\n",
697 self.write_plain(&s
)?
;
702 // Format a number with thousands separators
703 fn fmt_thousands_sep(mut n
: usize, sep
: char) -> String
{
705 let mut output
= String
::new();
706 let mut trailing
= false;
707 for &pow
in &[9, 6, 3, 0] {
708 let base
= 10_usize
.pow(pow
);
709 if pow
== 0 || trailing
|| n
/ base
!= 0 {
711 output
.write_fmt(format_args
!("{}", n
/ base
)).unwrap();
713 output
.write_fmt(format_args
!("{:03}", n
/ base
)).unwrap();
726 pub fn fmt_bench_samples(bs
: &BenchSamples
) -> String
{
728 let mut output
= String
::new();
730 let median
= bs
.ns_iter_summ
.median
as usize;
731 let deviation
= (bs
.ns_iter_summ
.max
- bs
.ns_iter_summ
.min
) as usize;
733 output
.write_fmt(format_args
!("{:>11} ns/iter (+/- {})",
734 fmt_thousands_sep(median
, '
,'
),
735 fmt_thousands_sep(deviation
, '
,'
)))
738 output
.write_fmt(format_args
!(" = {} MB/s", bs
.mb_s
)).unwrap();
743 // A simple console test runner
744 pub fn run_tests_console(opts
: &TestOpts
, tests
: Vec
<TestDescAndFn
>) -> io
::Result
<bool
> {
746 fn callback
<T
: Write
>(event
: &TestEvent
, st
: &mut ConsoleTestState
<T
>) -> io
::Result
<()> {
747 match (*event
).clone() {
748 TeFiltered(ref filtered_tests
) => st
.write_run_start(filtered_tests
.len()),
749 TeWait(ref test
, padding
) => st
.write_test_start(test
, padding
),
750 TeTimeout(ref test
) => st
.write_timeout(test
),
751 TeResult(test
, result
, stdout
) => {
752 st
.write_log(&test
, &result
)?
;
753 st
.write_result(&result
)?
;
755 TrOk
=> st
.passed
+= 1,
756 TrIgnored
=> st
.ignored
+= 1,
758 let tname
= test
.name
;
759 let MetricMap(mm
) = mm
;
762 .insert_metric(&format
!("{}.{}", tname
, k
), v
.value
, v
.noise
);
767 st
.metrics
.insert_metric(test
.name
.as_slice(),
768 bs
.ns_iter_summ
.median
,
769 bs
.ns_iter_summ
.max
- bs
.ns_iter_summ
.min
);
774 st
.failures
.push((test
, stdout
));
782 let mut st
= ConsoleTestState
::new(opts
, None
::<io
::Stdout
>)?
;
783 fn len_if_padded(t
: &TestDescAndFn
) -> usize {
784 match t
.testfn
.padding() {
786 PadOnRight
=> t
.desc
.name
.as_slice().len(),
789 if let Some(t
) = tests
.iter().max_by_key(|t
| len_if_padded(*t
)) {
790 let n
= t
.desc
.name
.as_slice();
791 st
.max_name_len
= n
.len();
793 run_tests(opts
, tests
, |x
| callback(&x
, &mut st
))?
;
794 return st
.write_run_finish();
798 fn should_sort_failures_before_printing_them() {
799 let test_a
= TestDesc
{
800 name
: StaticTestName("a"),
802 should_panic
: ShouldPanic
::No
,
805 let test_b
= TestDesc
{
806 name
: StaticTestName("b"),
808 should_panic
: ShouldPanic
::No
,
811 let mut st
= ConsoleTestState
{
813 out
: Raw(Vec
::new()),
822 metrics
: MetricMap
::new(),
823 failures
: vec
![(test_b
, Vec
::new()), (test_a
, Vec
::new())],
826 st
.write_failures().unwrap();
827 let s
= match st
.out
{
828 Raw(ref m
) => String
::from_utf8_lossy(&m
[..]),
829 Pretty(_
) => unreachable
!(),
832 let apos
= s
.find("a").unwrap();
833 let bpos
= s
.find("b").unwrap();
834 assert
!(apos
< bpos
);
837 fn use_color(opts
: &TestOpts
) -> bool
{
839 AutoColor
=> !opts
.nocapture
&& stdout_isatty(),
846 fn stdout_isatty() -> bool
{
847 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
850 fn stdout_isatty() -> bool
{
853 type HANDLE
= *mut u8;
854 type LPDWORD
= *mut u32;
855 const STD_OUTPUT_HANDLE
: DWORD
= -11i32 as DWORD
;
857 fn GetStdHandle(which
: DWORD
) -> HANDLE
;
858 fn GetConsoleMode(hConsoleHandle
: HANDLE
, lpMode
: LPDWORD
) -> BOOL
;
861 let handle
= GetStdHandle(STD_OUTPUT_HANDLE
);
863 GetConsoleMode(handle
, &mut out
) != 0
869 TeFiltered(Vec
<TestDesc
>),
870 TeWait(TestDesc
, NamePadding
),
871 TeResult(TestDesc
, TestResult
, Vec
<u8>),
875 pub type MonitorMsg
= (TestDesc
, TestResult
, Vec
<u8>);
878 fn run_tests
<F
>(opts
: &TestOpts
, tests
: Vec
<TestDescAndFn
>, mut callback
: F
) -> io
::Result
<()>
879 where F
: FnMut(TestEvent
) -> io
::Result
<()>
881 use std
::collections
::HashMap
;
882 use std
::sync
::mpsc
::RecvTimeoutError
;
884 let mut filtered_tests
= filter_tests(opts
, tests
);
885 if !opts
.bench_benchmarks
{
886 filtered_tests
= convert_benchmarks_to_tests(filtered_tests
);
889 let filtered_descs
= filtered_tests
.iter()
890 .map(|t
| t
.desc
.clone())
893 callback(TeFiltered(filtered_descs
))?
;
895 let (filtered_tests
, filtered_benchs_and_metrics
): (Vec
<_
>, _
) =
896 filtered_tests
.into_iter().partition(|e
| {
898 StaticTestFn(_
) | DynTestFn(_
) => true,
903 let concurrency
= match opts
.test_threads
{
905 None
=> get_concurrency(),
908 let mut remaining
= filtered_tests
;
912 let (tx
, rx
) = channel
::<MonitorMsg
>();
914 let mut running_tests
: HashMap
<TestDesc
, Instant
> = HashMap
::new();
916 fn get_timed_out_tests(running_tests
: &mut HashMap
<TestDesc
, Instant
>) -> Vec
<TestDesc
> {
917 let now
= Instant
::now();
918 let timed_out
= running_tests
.iter()
919 .filter_map(|(desc
, timeout
)| if &now
>= timeout { Some(desc.clone())}
else { None }
)
921 for test
in &timed_out
{
922 running_tests
.remove(test
);
927 fn calc_timeout(running_tests
: &HashMap
<TestDesc
, Instant
>) -> Option
<Duration
> {
928 running_tests
.values().min().map(|next_timeout
| {
929 let now
= Instant
::now();
930 if *next_timeout
>= now
{
937 while pending
> 0 || !remaining
.is_empty() {
938 while pending
< concurrency
&& !remaining
.is_empty() {
939 let test
= remaining
.pop().unwrap();
940 if concurrency
== 1 {
941 // We are doing one test at a time so we can print the name
942 // of the test before we run it. Useful for debugging tests
943 // that hang forever.
944 callback(TeWait(test
.desc
.clone(), test
.testfn
.padding()))?
;
946 let timeout
= Instant
::now() + Duration
::from_secs(TEST_WARN_TIMEOUT_S
);
947 running_tests
.insert(test
.desc
.clone(), timeout
);
948 run_test(opts
, !opts
.run_tests
, test
, tx
.clone());
954 if let Some(timeout
) = calc_timeout(&running_tests
) {
955 res
= rx
.recv_timeout(timeout
);
956 for test
in get_timed_out_tests(&mut running_tests
) {
957 callback(TeTimeout(test
))?
;
959 if res
!= Err(RecvTimeoutError
::Timeout
) {
963 res
= rx
.recv().map_err(|_
| RecvTimeoutError
::Disconnected
);
968 let (desc
, result
, stdout
) = res
.unwrap();
969 running_tests
.remove(&desc
);
971 if concurrency
!= 1 {
972 callback(TeWait(desc
.clone(), PadNone
))?
;
974 callback(TeResult(desc
, result
, stdout
))?
;
978 if opts
.bench_benchmarks
{
979 // All benchmarks run at the end, in serial.
980 // (this includes metric fns)
981 for b
in filtered_benchs_and_metrics
{
982 callback(TeWait(b
.desc
.clone(), b
.testfn
.padding()))?
;
983 run_test(opts
, false, b
, tx
.clone());
984 let (test
, result
, stdout
) = rx
.recv().unwrap();
985 callback(TeResult(test
, result
, stdout
))?
;
992 fn get_concurrency() -> usize {
993 return match env
::var("RUST_TEST_THREADS") {
995 let opt_n
: Option
<usize> = s
.parse().ok();
997 Some(n
) if n
> 0 => n
,
999 panic
!("RUST_TEST_THREADS is `{}`, should be a positive integer.",
1004 Err(..) => num_cpus(),
1009 fn num_cpus() -> usize {
1011 struct SYSTEM_INFO
{
1012 wProcessorArchitecture
: u16,
1015 lpMinimumApplicationAddress
: *mut u8,
1016 lpMaximumApplicationAddress
: *mut u8,
1017 dwActiveProcessorMask
: *mut u8,
1018 dwNumberOfProcessors
: u32,
1019 dwProcessorType
: u32,
1020 dwAllocationGranularity
: u32,
1021 wProcessorLevel
: u16,
1022 wProcessorRevision
: u16,
1025 fn GetSystemInfo(info
: *mut SYSTEM_INFO
) -> i32;
1028 let mut sysinfo
= std
::mem
::zeroed();
1029 GetSystemInfo(&mut sysinfo
);
1030 sysinfo
.dwNumberOfProcessors
as usize
1034 #[cfg(any(target_os = "linux",
1035 target_os
= "macos",
1037 target_os
= "android",
1038 target_os
= "solaris",
1039 target_os
= "emscripten",
1040 target_os
= "fuchsia"))]
1041 fn num_cpus() -> usize {
1042 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1045 #[cfg(any(target_os = "freebsd",
1046 target_os
= "dragonfly",
1047 target_os
= "bitrig",
1048 target_os
= "netbsd"))]
1049 fn num_cpus() -> usize {
1052 let mut cpus
: libc
::c_uint
= 0;
1053 let mut cpus_size
= std
::mem
::size_of_val(&cpus
);
1056 cpus
= libc
::sysconf(libc
::_SC_NPROCESSORS_ONLN
) as libc
::c_uint
;
1059 let mut mib
= [libc
::CTL_HW
, libc
::HW_NCPU
, 0, 0];
1061 libc
::sysctl(mib
.as_mut_ptr(),
1063 &mut cpus
as *mut _
as *mut _
,
1064 &mut cpus_size
as *mut _
as *mut _
,
1075 #[cfg(target_os = "openbsd")]
1076 fn num_cpus() -> usize {
1079 let mut cpus
: libc
::c_uint
= 0;
1080 let mut cpus_size
= std
::mem
::size_of_val(&cpus
);
1081 let mut mib
= [libc
::CTL_HW
, libc
::HW_NCPU
, 0, 0];
1084 libc
::sysctl(mib
.as_mut_ptr(),
1086 &mut cpus
as *mut _
as *mut _
,
1087 &mut cpus_size
as *mut _
as *mut _
,
1097 #[cfg(target_os = "haiku")]
1098 fn num_cpus() -> usize {
1104 pub fn filter_tests(opts
: &TestOpts
, tests
: Vec
<TestDescAndFn
>) -> Vec
<TestDescAndFn
> {
1105 let mut filtered
= tests
;
1107 // Remove tests that don't match the test filter
1108 filtered
= match opts
.filter
{
1110 Some(ref filter
) => {
1111 filtered
.into_iter()
1112 .filter(|test
| test
.desc
.name
.as_slice().contains(&filter
[..]))
1117 // Skip tests that match any of the skip filters
1118 filtered
= filtered
.into_iter()
1119 .filter(|t
| !opts
.skip
.iter().any(|sf
| t
.desc
.name
.as_slice().contains(&sf
[..])))
1122 // Maybe pull out the ignored test and unignore them
1123 filtered
= if !opts
.run_ignored
{
1126 fn filter(test
: TestDescAndFn
) -> Option
<TestDescAndFn
> {
1127 if test
.desc
.ignore
{
1128 let TestDescAndFn {desc, testfn}
= test
;
1129 Some(TestDescAndFn
{
1130 desc
: TestDesc { ignore: false, ..desc }
,
1137 filtered
.into_iter().filter_map(filter
).collect()
1140 // Sort the tests alphabetically
1141 filtered
.sort_by(|t1
, t2
| t1
.desc
.name
.as_slice().cmp(t2
.desc
.name
.as_slice()));
1146 pub fn convert_benchmarks_to_tests(tests
: Vec
<TestDescAndFn
>) -> Vec
<TestDescAndFn
> {
1147 // convert benchmarks to tests, if we're not benchmarking them
1148 tests
.into_iter().map(|x
| {
1149 let testfn
= match x
.testfn
{
1150 DynBenchFn(bench
) => {
1151 DynTestFn(Box
::new(move |()| {
1152 bench
::run_once(|b
| bench
.run(b
))
1155 StaticBenchFn(benchfn
) => {
1156 DynTestFn(Box
::new(move |()| {
1157 bench
::run_once(|b
| benchfn(b
))
1169 pub fn run_test(opts
: &TestOpts
,
1171 test
: TestDescAndFn
,
1172 monitor_ch
: Sender
<MonitorMsg
>) {
1174 let TestDescAndFn {desc, testfn}
= test
;
1176 if force_ignore
|| desc
.ignore
{
1177 monitor_ch
.send((desc
, TrIgnored
, Vec
::new())).unwrap();
1181 fn run_test_inner(desc
: TestDesc
,
1182 monitor_ch
: Sender
<MonitorMsg
>,
1184 testfn
: Box
<FnBox
<()>>) {
1185 struct Sink(Arc
<Mutex
<Vec
<u8>>>);
1186 impl Write
for Sink
{
1187 fn write(&mut self, data
: &[u8]) -> io
::Result
<usize> {
1188 Write
::write(&mut *self.0.lock().unwrap(), data
)
1190 fn flush(&mut self) -> io
::Result
<()> {
1195 // Buffer for capturing standard I/O
1196 let data
= Arc
::new(Mutex
::new(Vec
::new()));
1197 let data2
= data
.clone();
1199 let name
= desc
.name
.clone();
1200 let runtest
= move || {
1201 let oldio
= if !nocapture
{
1203 io
::set_print(Some(Box
::new(Sink(data2
.clone())))),
1204 io
::set_panic(Some(Box
::new(Sink(data2
))))
1210 let result
= catch_unwind(AssertUnwindSafe(|| {
1214 if let Some((printio
, panicio
)) = oldio
{
1215 io
::set_print(printio
);
1216 io
::set_panic(panicio
);
1219 let test_result
= calc_result(&desc
, result
);
1220 let stdout
= data
.lock().unwrap().to_vec();
1221 monitor_ch
.send((desc
.clone(), test_result
, stdout
)).unwrap();
1225 // If the platform is single-threaded we're just going to run
1226 // the test synchronously, regardless of the concurrency
1228 let supports_threads
= !cfg
!(target_os
= "emscripten");
1229 if supports_threads
{
1230 let cfg
= thread
::Builder
::new().name(match name
{
1231 DynTestName(ref name
) => name
.clone(),
1232 StaticTestName(name
) => name
.to_owned(),
1234 cfg
.spawn(runtest
).unwrap();
1241 DynBenchFn(bencher
) => {
1242 let bs
= ::bench
::benchmark(|harness
| bencher
.run(harness
));
1243 monitor_ch
.send((desc
, TrBench(bs
), Vec
::new())).unwrap();
1246 StaticBenchFn(benchfn
) => {
1247 let bs
= ::bench
::benchmark(|harness
| (benchfn
.clone())(harness
));
1248 monitor_ch
.send((desc
, TrBench(bs
), Vec
::new())).unwrap();
1252 let mut mm
= MetricMap
::new();
1253 f
.call_box(&mut mm
);
1254 monitor_ch
.send((desc
, TrMetrics(mm
), Vec
::new())).unwrap();
1257 StaticMetricFn(f
) => {
1258 let mut mm
= MetricMap
::new();
1260 monitor_ch
.send((desc
, TrMetrics(mm
), Vec
::new())).unwrap();
1263 DynTestFn(f
) => run_test_inner(desc
, monitor_ch
, opts
.nocapture
, f
),
1264 StaticTestFn(f
) => run_test_inner(desc
, monitor_ch
, opts
.nocapture
,
1265 Box
::new(move |()| f())),
1269 fn calc_result(desc
: &TestDesc
, task_result
: Result
<(), Box
<Any
+ Send
>>) -> TestResult
{
1270 match (&desc
.should_panic
, task_result
) {
1271 (&ShouldPanic
::No
, Ok(())) |
1272 (&ShouldPanic
::Yes
, Err(_
)) => TrOk
,
1273 (&ShouldPanic
::YesWithMessage(msg
), Err(ref err
))
1274 if err
.downcast_ref
::<String
>()
1276 .or_else(|| err
.downcast_ref
::<&'
static str>().map(|e
| *e
))
1277 .map(|e
| e
.contains(msg
))
1278 .unwrap_or(false) => TrOk
,
1284 pub fn new() -> MetricMap
{
1285 MetricMap(BTreeMap
::new())
1288 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1289 /// must be non-negative. The `noise` indicates the uncertainty of the
1290 /// metric, which doubles as the "noise range" of acceptable
1291 /// pairwise-regressions on this named value, when comparing from one
1292 /// metric to the next using `compare_to_old`.
1294 /// If `noise` is positive, then it means this metric is of a value
1295 /// you want to see grow smaller, so a change larger than `noise` in the
1296 /// positive direction represents a regression.
1298 /// If `noise` is negative, then it means this metric is of a value
1299 /// you want to see grow larger, so a change larger than `noise` in the
1300 /// negative direction represents a regression.
1301 pub fn insert_metric(&mut self, name
: &str, value
: f64, noise
: f64) {
1306 let MetricMap(ref mut map
) = *self;
1307 map
.insert(name
.to_owned(), m
);
1310 pub fn fmt_metrics(&self) -> String
{
1311 let MetricMap(ref mm
) = *self;
1312 let v
: Vec
<String
> = mm
.iter()
1313 .map(|(k
, v
)| format
!("{}: {} (+/- {})", *k
, v
.value
, v
.noise
))
1322 /// A function that is opaque to the optimizer, to allow benchmarks to
1323 /// pretend to use outputs to assist in avoiding dead-code
1326 /// This function is a no-op, and does not even read from `dummy`.
1327 #[cfg(not(any(all(target_os = "nacl", target_arch = "le32"),
1328 target_arch
= "asmjs", target_arch
= "wasm32")))]
1329 pub fn black_box
<T
>(dummy
: T
) -> T
{
1330 // we need to "use" the argument in some way LLVM can't
1332 unsafe { asm!("" : : "r"(&dummy)) }
1335 #[cfg(any(all(target_os = "nacl", target_arch = "le32"),
1336 target_arch
= "asmjs", target_arch
= "wasm32"))]
1338 pub fn black_box
<T
>(dummy
: T
) -> T
{
1344 /// Callback for benchmark functions to run in their body.
1345 pub fn iter
<T
, F
>(&mut self, mut inner
: F
)
1346 where F
: FnMut() -> T
1348 let start
= Instant
::now();
1349 let k
= self.iterations
;
1353 self.dur
= start
.elapsed();
1356 pub fn ns_elapsed(&mut self) -> u64 {
1357 self.dur
.as_secs() * 1_000_000_000 + (self.dur
.subsec_nanos() as u64)
1360 pub fn ns_per_iter(&mut self) -> u64 {
1361 if self.iterations
== 0 {
1364 self.ns_elapsed() / cmp
::max(self.iterations
, 1)
1368 pub fn bench_n
<F
>(&mut self, n
: u64, f
: F
)
1369 where F
: FnOnce(&mut Bencher
)
1371 self.iterations
= n
;
1375 // This is a more statistics-driven benchmark algorithm
1376 pub fn auto_bench
<F
>(&mut self, mut f
: F
) -> stats
::Summary
1377 where F
: FnMut(&mut Bencher
)
1379 // Initial bench run to get ballpark figure.
1381 self.bench_n(n
, |x
| f(x
));
1383 // Try to estimate iter count for 1ms falling back to 1m
1384 // iterations if first run took < 1ns.
1385 if self.ns_per_iter() == 0 {
1388 n
= 1_000_000 / cmp
::max(self.ns_per_iter(), 1);
1390 // if the first run took more than 1ms we don't want to just
1391 // be left doing 0 iterations on every loop. The unfortunate
1392 // side effect of not being able to do as many runs is
1393 // automatically handled by the statistical analysis below
1394 // (i.e. larger error bars).
1399 let mut total_run
= Duration
::new(0, 0);
1400 let samples
: &mut [f64] = &mut [0.0_f64; 50];
1402 let loop_start
= Instant
::now();
1404 for p
in &mut *samples
{
1405 self.bench_n(n
, |x
| f(x
));
1406 *p
= self.ns_per_iter() as f64;
1409 stats
::winsorize(samples
, 5.0);
1410 let summ
= stats
::Summary
::new(samples
);
1412 for p
in &mut *samples
{
1413 self.bench_n(5 * n
, |x
| f(x
));
1414 *p
= self.ns_per_iter() as f64;
1417 stats
::winsorize(samples
, 5.0);
1418 let summ5
= stats
::Summary
::new(samples
);
1419 let loop_run
= loop_start
.elapsed();
1421 // If we've run for 100ms and seem to have converged to a
1423 if loop_run
> Duration
::from_millis(100) && summ
.median_abs_dev_pct
< 1.0 &&
1424 summ
.median
- summ5
.median
< summ5
.median_abs_dev
{
1428 total_run
= total_run
+ loop_run
;
1429 // Longest we ever run for is 3s.
1430 if total_run
> Duration
::from_secs(3) {
1434 // If we overflow here just return the results so far. We check a
1435 // multiplier of 10 because we're about to multiply by 2 and the
1436 // next iteration of the loop will also multiply by 5 (to calculate
1437 // the summ5 result)
1438 n
= match n
.checked_mul(10) {
1440 None
=> return summ5
,
1448 use std
::time
::Duration
;
1449 use super::{Bencher, BenchSamples}
;
1451 pub fn benchmark
<F
>(f
: F
) -> BenchSamples
1452 where F
: FnMut(&mut Bencher
)
1454 let mut bs
= Bencher
{
1456 dur
: Duration
::new(0, 0),
1460 let ns_iter_summ
= bs
.auto_bench(f
);
1462 let ns_iter
= cmp
::max(ns_iter_summ
.median
as u64, 1);
1463 let mb_s
= bs
.bytes
* 1000 / ns_iter
;
1466 ns_iter_summ
: ns_iter_summ
,
1467 mb_s
: mb_s
as usize,
1471 pub fn run_once
<F
>(f
: F
)
1472 where F
: FnOnce(&mut Bencher
)
1474 let mut bs
= Bencher
{
1476 dur
: Duration
::new(0, 0),
1485 use test
::{TrFailed
, TrIgnored
, TrOk
, filter_tests
, parse_opts
, TestDesc
, TestDescAndFn
,
1486 TestOpts
, run_test
, MetricMap
, StaticTestName
, DynTestName
, DynTestFn
, ShouldPanic
};
1487 use std
::sync
::mpsc
::channel
;
1490 pub fn do_not_run_ignored_tests() {
1494 let desc
= TestDescAndFn
{
1496 name
: StaticTestName("whatever"),
1498 should_panic
: ShouldPanic
::No
,
1500 testfn
: DynTestFn(Box
::new(move |()| f())),
1502 let (tx
, rx
) = channel();
1503 run_test(&TestOpts
::new(), false, desc
, tx
);
1504 let (_
, res
, _
) = rx
.recv().unwrap();
1505 assert
!(res
!= TrOk
);
1509 pub fn ignored_tests_result_in_ignored() {
1511 let desc
= TestDescAndFn
{
1513 name
: StaticTestName("whatever"),
1515 should_panic
: ShouldPanic
::No
,
1517 testfn
: DynTestFn(Box
::new(move |()| f())),
1519 let (tx
, rx
) = channel();
1520 run_test(&TestOpts
::new(), false, desc
, tx
);
1521 let (_
, res
, _
) = rx
.recv().unwrap();
1522 assert
!(res
== TrIgnored
);
1526 fn test_should_panic() {
1530 let desc
= TestDescAndFn
{
1532 name
: StaticTestName("whatever"),
1534 should_panic
: ShouldPanic
::Yes
,
1536 testfn
: DynTestFn(Box
::new(move |()| f())),
1538 let (tx
, rx
) = channel();
1539 run_test(&TestOpts
::new(), false, desc
, tx
);
1540 let (_
, res
, _
) = rx
.recv().unwrap();
1541 assert
!(res
== TrOk
);
1545 fn test_should_panic_good_message() {
1547 panic
!("an error message");
1549 let desc
= TestDescAndFn
{
1551 name
: StaticTestName("whatever"),
1553 should_panic
: ShouldPanic
::YesWithMessage("error message"),
1555 testfn
: DynTestFn(Box
::new(move |()| f())),
1557 let (tx
, rx
) = channel();
1558 run_test(&TestOpts
::new(), false, desc
, tx
);
1559 let (_
, res
, _
) = rx
.recv().unwrap();
1560 assert
!(res
== TrOk
);
1564 fn test_should_panic_bad_message() {
1566 panic
!("an error message");
1568 let desc
= TestDescAndFn
{
1570 name
: StaticTestName("whatever"),
1572 should_panic
: ShouldPanic
::YesWithMessage("foobar"),
1574 testfn
: DynTestFn(Box
::new(move |()| f())),
1576 let (tx
, rx
) = channel();
1577 run_test(&TestOpts
::new(), false, desc
, tx
);
1578 let (_
, res
, _
) = rx
.recv().unwrap();
1579 assert
!(res
== TrFailed
);
1583 fn test_should_panic_but_succeeds() {
1585 let desc
= TestDescAndFn
{
1587 name
: StaticTestName("whatever"),
1589 should_panic
: ShouldPanic
::Yes
,
1591 testfn
: DynTestFn(Box
::new(move |()| f())),
1593 let (tx
, rx
) = channel();
1594 run_test(&TestOpts
::new(), false, desc
, tx
);
1595 let (_
, res
, _
) = rx
.recv().unwrap();
1596 assert
!(res
== TrFailed
);
1600 fn parse_ignored_flag() {
1601 let args
= vec
!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
1602 let opts
= match parse_opts(&args
) {
1604 _
=> panic
!("Malformed arg in parse_ignored_flag"),
1606 assert
!((opts
.run_ignored
));
1610 pub fn filter_for_ignored_option() {
1611 // When we run ignored tests the test filter should filter out all the
1612 // unignored tests and flip the ignore flag on the rest to false
1614 let mut opts
= TestOpts
::new();
1615 opts
.run_tests
= true;
1616 opts
.run_ignored
= true;
1618 let tests
= vec
![TestDescAndFn
{
1620 name
: StaticTestName("1"),
1622 should_panic
: ShouldPanic
::No
,
1624 testfn
: DynTestFn(Box
::new(move |()| {}
)),
1628 name
: StaticTestName("2"),
1630 should_panic
: ShouldPanic
::No
,
1632 testfn
: DynTestFn(Box
::new(move |()| {}
)),
1634 let filtered
= filter_tests(&opts
, tests
);
1636 assert_eq
!(filtered
.len(), 1);
1637 assert_eq
!(filtered
[0].desc
.name
.to_string(), "1");
1638 assert
!(!filtered
[0].desc
.ignore
);
1642 pub fn sort_tests() {
1643 let mut opts
= TestOpts
::new();
1644 opts
.run_tests
= true;
1646 let names
= vec
!["sha1::test".to_string(),
1647 "isize::test_to_str".to_string(),
1648 "isize::test_pow".to_string(),
1649 "test::do_not_run_ignored_tests".to_string(),
1650 "test::ignored_tests_result_in_ignored".to_string(),
1651 "test::first_free_arg_should_be_a_filter".to_string(),
1652 "test::parse_ignored_flag".to_string(),
1653 "test::filter_for_ignored_option".to_string(),
1654 "test::sort_tests".to_string()];
1657 let mut tests
= Vec
::new();
1658 for name
in &names
{
1659 let test
= TestDescAndFn
{
1661 name
: DynTestName((*name
).clone()),
1663 should_panic
: ShouldPanic
::No
,
1665 testfn
: DynTestFn(Box
::new(move |()| testfn())),
1671 let filtered
= filter_tests(&opts
, tests
);
1673 let expected
= vec
!["isize::test_pow".to_string(),
1674 "isize::test_to_str".to_string(),
1675 "sha1::test".to_string(),
1676 "test::do_not_run_ignored_tests".to_string(),
1677 "test::filter_for_ignored_option".to_string(),
1678 "test::first_free_arg_should_be_a_filter".to_string(),
1679 "test::ignored_tests_result_in_ignored".to_string(),
1680 "test::parse_ignored_flag".to_string(),
1681 "test::sort_tests".to_string()];
1683 for (a
, b
) in expected
.iter().zip(filtered
) {
1684 assert
!(*a
== b
.desc
.name
.to_string());
1689 pub fn test_metricmap_compare() {
1690 let mut m1
= MetricMap
::new();
1691 let mut m2
= MetricMap
::new();
1692 m1
.insert_metric("in-both-noise", 1000.0, 200.0);
1693 m2
.insert_metric("in-both-noise", 1100.0, 200.0);
1695 m1
.insert_metric("in-first-noise", 1000.0, 2.0);
1696 m2
.insert_metric("in-second-noise", 1000.0, 2.0);
1698 m1
.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1699 m2
.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1701 m1
.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1702 m2
.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1704 m1
.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1705 m2
.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1707 m1
.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1708 m2
.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);