1 # Copyright 2005 Dave Abrahams
2 # Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
3 # Copyright 2014-2015 Rene Rivera
4 # Copyright 2014 Microsoft Corporation
5 # Distributed under the Boost Software License, Version 1.0.
6 # (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
8 # This module implements regression testing framework. It declares a number of
9 # main target rules which perform some action and, if the results are OK,
10 # creates an output file.
12 # The exact list of rules is:
13 # 'compile' -- creates .test file if compilation of sources was
15 # 'compile-fail' -- creates .test file if compilation of sources failed.
16 # 'run' -- creates .test file is running of executable produced from
17 # sources was successful. Also leaves behind .output file
18 # with the output from program run.
19 # 'run-fail' -- same as above, but .test file is created if running fails.
21 # In all cases, presence of .test file is an indication that the test passed.
22 # For more convenient reporting, you might want to use C++ Boost regression
23 # testing utilities (see http://www.boost.org/more/regression.html).
25 # For historical reason, a 'unit-test' rule is available which has the same
26 # syntax as 'exe' and behaves just like 'run'.
29 # - Teach compiler_status handle Jamfile.v2.
31 # - <no-warn> is not implemented, since it is Como-specific, and it is not
32 # clear how to implement it
33 # - std::locale-support is not implemented (it is used in one test).
52 import virtual-target ;
60 # Feature controlling the command used to launch test programs.
61 feature.feature testing.launcher : : free optional ;
63 feature.feature test-info : : free incidental ;
64 feature.feature testing.arg : : free incidental ;
65 feature.feature testing.input-file : : free dependency ;
67 feature.feature preserve-test-targets : on off : incidental propagated ;
69 # Feature to control whether executable binaries are run as part of test.
70 # This can be used to just compile test cases in cross compilation situations.
71 feature.feature testing.execute : on off : incidental propagated ;
72 feature.set-default testing.execute : on ;
74 # Register target types.
75 type.register TEST : test ;
76 type.register COMPILE : : TEST ;
77 type.register COMPILE_FAIL : : TEST ;
78 type.register RUN_OUTPUT : run ;
79 type.register RUN : : TEST ;
80 type.register RUN_FAIL : : TEST ;
81 type.register LINK_FAIL : : TEST ;
82 type.register LINK : : TEST ;
83 type.register UNIT_TEST : passed : TEST ;
86 # Suffix to denote test target directory
88 .TEST-DIR-SUFFIX = ".test" ;
91 .TEST-DIR-SUFFIX = "$test" ;
94 # Declare the rules which create main targets. While the 'type' module already
95 # creates rules with the same names for us, we need extra convenience: default
96 # name of main target, so write our own versions.
98 # Helper rule. Create a test target, using basename of first source if no target
99 # name is explicitly passed. Remembers the created target in a global variable.
101 rule make-test ( target-type : sources + : requirements * : target-name ? )
103 target-name ?= $(sources[1]:D=:S=) ;
105 # Having periods (".") in the target name is problematic because the typed
106 # generator will strip the suffix and use the bare name for the file
107 # targets. Even though the location-prefix averts problems most times it
108 # does not prevent ambiguity issues when referring to the test targets. For
109 # example when using the XML log output. So we rename the target to remove
110 # the periods, and provide an alias for users.
111 local real-name = [ regex.replace $(target-name) "[.]" "~" ] ;
113 local project = [ project.current ] ;
114 # The <location-prefix> forces the build system for generate paths in the
115 # form '$build_dir/array1$(.TEST-DIR-SUFFIX)/gcc/debug'. This is necessary
116 # to allow post-processing tools to work.
117 local t = [ targets.create-typed-target [ type.type-from-rule-name
118 $(target-type) ] : $(project) : $(real-name) : $(sources) :
119 $(requirements) <location-prefix>$(real-name)$(.TEST-DIR-SUFFIX) ] ;
121 # The alias to the real target, per period replacement above.
122 if $(real-name) != $(target-name)
124 alias $(target-name) : $(t) ;
127 # Remember the test (for --dump-tests). A good way would be to collect all
128 # given a project. This has some technical problems: e.g. we can not call
129 # this dump from a Jamfile since projects referred by 'build-project' are
130 # not available until the whole Jamfile has been loaded.
136 # Note: passing more that one cpp file here is known to fail. Passing a cpp file
137 # and a library target works.
139 rule compile ( sources + : requirements * : target-name ? )
141 return [ make-test compile : $(sources) : $(requirements) : $(target-name) ]
146 rule compile-fail ( sources + : requirements * : target-name ? )
148 return [ make-test compile-fail : $(sources) : $(requirements) :
153 rule link ( sources + : requirements * : target-name ? )
155 return [ make-test link : $(sources) : $(requirements) : $(target-name) ] ;
159 rule link-fail ( sources + : requirements * : target-name ? )
161 return [ make-test link-fail : $(sources) : $(requirements) : $(target-name)
166 rule handle-input-files ( input-files * )
170 # Check that sorting made when creating property-set instance will not
171 # change the ordering.
172 if [ sequence.insertion-sort $(input-files) ] != $(input-files)
174 errors.user-error "Names of input files must be sorted alphabetically"
175 : "due to internal limitations" ;
178 return <testing.input-file>$(input-files) ;
182 rule run ( sources + : args * : input-files * : requirements * : target-name ? :
185 requirements += <testing.arg>$(args:J=" ") ;
186 requirements += [ handle-input-files $(input-files) ] ;
187 return [ make-test run : $(sources) : $(requirements) : $(target-name) ] ;
191 rule run-fail ( sources + : args * : input-files * : requirements * :
192 target-name ? : default-build * )
194 requirements += <testing.arg>$(args:J=" ") ;
195 requirements += [ handle-input-files $(input-files) ] ;
196 return [ make-test run-fail : $(sources) : $(requirements) : $(target-name)
201 # Use 'test-suite' as a synonym for 'alias', for backward compatibility.
202 IMPORT : alias : : test-suite ;
205 # For all main targets in 'project-module', which are typed targets with type
206 # derived from 'TEST', produce some interesting information.
210 for local t in $(.all-tests)
217 # Given a project location in normalized form (slashes are forward), compute the
218 # name of the Boost library.
220 local rule get-library-name ( path )
222 # Path is in normalized form, so all slashes are forward.
223 local match1 = [ MATCH /(tools|libs)/(.*)/(test|example) : $(path) ] ;
224 local match2 = [ MATCH /(tools|libs)/(.*)$ : $(path) ] ;
225 local match3 = [ MATCH (/status$) : $(path) ] ;
227 if $(match1) { return $(match1[2]) ; }
228 else if $(match2) { return $(match2[2]) ; }
229 else if $(match3) { return "" ; }
230 else if --dump-tests in [ modules.peek : ARGV ]
232 # The 'run' rule and others might be used outside boost. In that case,
233 # just return the path, since the 'library name' makes no sense.
239 # Was an XML dump requested?
240 .out-xml = [ MATCH --out-xml=(.*) : [ modules.peek : ARGV ] ] ;
243 # Takes a target (instance of 'basic-target') and prints
246 # - comments specified via the <test-info> property
247 # - relative location of all source from the project root.
249 rule dump-test ( target )
251 local type = [ $(target).type ] ;
252 local name = [ $(target).name ] ;
253 local project = [ $(target).project ] ;
255 local project-root = [ $(project).get project-root ] ;
256 local library = [ get-library-name [ path.root [ $(project).get location ]
260 name = $(library)/$(name) ;
263 local sources = [ $(target).sources ] ;
265 for local s in $(sources)
267 if [ class.is-a $(s) : file-reference ]
269 local location = [ path.root [ path.root [ $(s).name ]
270 [ $(s).location ] ] [ path.pwd ] ] ;
272 source-files += [ path.relative-to [ path.root $(project-root)
273 [ path.pwd ] ] $(location) ] ;
278 [ $(project).get location ] // [ $(target).name ] $(.TEST-DIR-SUFFIX) ;
279 target-name = $(target-name:J=) ;
281 local r = [ $(target).requirements ] ;
282 # Extract values of the <test-info> feature.
283 local test-info = [ $(r).get <test-info> ] ;
285 # If the user requested XML output on the command-line, add the test info to
286 # that XML file rather than dumping them to stdout.
291 .contents on $(.out-xml) +=
292 "$(nl) <test type=\"$(type)\" name=\"$(name)\">"
293 "$(nl) <target><![CDATA[$(target-name)]]></target>"
294 "$(nl) <info><![CDATA[$(test-info)]]></info>"
295 "$(nl) <source><![CDATA[$(source-files)]]></source>"
301 # Format them into a single string of quoted strings.
302 test-info = \"$(test-info:J=\"\ \")\" ;
304 ECHO boost-test($(type)) \"$(name)\" [$(test-info)] ":"
305 \"$(source-files)\" ;
310 # Register generators. Depending on target type, either 'expect-success' or
311 # 'expect-failure' rule will be used.
312 generators.register-standard testing.expect-success : OBJ : COMPILE ;
313 generators.register-standard testing.expect-failure : OBJ : COMPILE_FAIL ;
314 generators.register-standard testing.expect-success : RUN_OUTPUT : RUN ;
315 generators.register-standard testing.expect-failure : RUN_OUTPUT : RUN_FAIL ;
316 generators.register-standard testing.expect-failure : EXE : LINK_FAIL ;
317 generators.register-standard testing.expect-success : EXE : LINK ;
319 # Generator which runs an EXE and captures output.
320 generators.register-standard testing.capture-output : EXE : RUN_OUTPUT ;
322 # Generator which creates a target if sources run successfully. Differs from RUN
323 # in that run output is not captured. The reason why it exists is that the 'run'
324 # rule is much better for automated testing, but is not user-friendly (see
325 # http://article.gmane.org/gmane.comp.lib.boost.build/6353).
326 generators.register-standard testing.unit-test : EXE : UNIT_TEST ;
329 # The action rules called by generators.
331 # Causes the 'target' to exist after bjam invocation if and only if all the
332 # dependencies were successfully built.
334 rule expect-success ( target : dependency + : requirements * )
336 **passed** $(target) : $(dependency) ;
340 # Causes the 'target' to exist after bjam invocation if and only if all some of
341 # the dependencies were not successfully built.
343 rule expect-failure ( target : dependency + : properties * )
345 local grist = [ MATCH ^<(.*)> : $(dependency:G) ] ;
346 local marker = $(dependency:G=$(grist)*fail) ;
347 (failed-as-expected) $(marker) ;
348 FAIL_EXPECTED $(dependency) ;
349 LOCATE on $(marker) = [ on $(dependency) return $(LOCATE) ] ;
351 DEPENDS $(marker) : $(dependency) ;
352 DEPENDS $(target) : $(marker) ;
353 **passed** $(target) : $(marker) ;
357 # The rule/action combination used to report successful passing of a test.
361 remove-test-targets $(<) ;
363 # Dump all the tests, if needed. We do it here, since dump should happen
364 # only after all Jamfiles have been read, and there is no such place
365 # currently defined (but there should be).
366 if ! $(.dumped-tests) && ( --dump-tests in [ modules.peek : ARGV ] )
368 .dumped-tests = true ;
372 # Force deletion of the target, in case any dependencies failed to build.
378 # Used to create test files signifying passed tests.
385 # Used to create replacement object files that do not get created during tests
386 # that are expected to fail.
388 actions (failed-as-expected)
390 echo failed as expected > "$(<)"
398 PIPE WRITE SYS$OUTPUT "passed" > $(<:W)
401 actions (failed-as-expected)
403 PIPE WRITE SYS$OUTPUT "failed as expected" > $(<:W)
407 rule run-path-setup ( target : source : properties * )
409 # For testing, we need to make sure that all dynamic libraries needed by the
410 # test are found. So, we collect all paths from dependency libraries (via
411 # xdll-path property) and add whatever explicit dll-path user has specified.
412 # The resulting paths are added to the environment on each test invocation.
413 local dll-paths = [ feature.get-values <dll-path> : $(properties) ] ;
414 dll-paths += [ feature.get-values <xdll-path> : $(properties) ] ;
415 dll-paths += [ on $(source) return $(RUN_PATH) ] ;
416 dll-paths = [ sequence.unique $(dll-paths) ] ;
419 translate-to-os = path.native ;
422 translate-to-os = path.to-VMS ;
424 dll-paths = [ sequence.transform $(translate-to-os) : $(dll-paths) ] ;
425 PATH_SETUP on $(target) = [ common.prepend-path-variable-command
426 [ os.shared-library-path-variable ] : $(dll-paths) ] ;
431 local argv = [ modules.peek : ARGV ] ;
433 toolset.flags testing.capture-output ARGS <testing.arg> ;
434 toolset.flags testing.capture-output INPUT_FILES <testing.input-file> ;
435 toolset.flags testing.capture-output LAUNCHER <testing.launcher> ;
437 .preserve-test-targets = on ;
438 if --remove-test-targets in [ modules.peek : ARGV ]
440 .preserve-test-targets = off ;
444 # Runs executable 'sources' and stores stdout in file 'target'. Unless
445 # --preserve-test-targets command line option has been specified, removes the
446 # executable. The 'target-to-remove' parameter controls what should be removed:
447 # - if 'none', does not remove anything, ever
448 # - if empty, removes 'source'
449 # - if non-empty and not 'none', contains a list of sources to remove.
451 rule capture-output ( target : source : properties * : targets-to-remove * )
453 output-file on $(target) = $(target:S=.output) ;
454 LOCATE on $(target:S=.output) = [ on $(target) return $(LOCATE) ] ;
456 # The INCLUDES kill a warning about independent target...
457 INCLUDES $(target) : $(target:S=.output) ;
458 # but it also puts .output into dependency graph, so we must tell jam it is
459 # OK if it cannot find the target or updating rule.
460 NOCARE $(target:S=.output) ;
462 # This has two-fold effect. First it adds input files to the dependency
463 # graph, preventing a warning. Second, it causes input files to be bound
464 # before target is created. Therefore, they are bound using SEARCH setting
465 # on them and not LOCATE setting of $(target), as in other case (due to jam
467 DEPENDS $(target) : [ on $(target) return $(INPUT_FILES) ] ;
469 if $(targets-to-remove) = none
471 targets-to-remove = ;
473 else if ! $(targets-to-remove)
475 targets-to-remove = $(source) ;
478 run-path-setup $(target) : $(source) : $(properties) ;
480 DISABLE_TEST_EXECUTION on $(target) = 0 ;
481 if [ feature.get-values testing.execute : $(properties) ] = off
483 DISABLE_TEST_EXECUTION on $(target) = 1 ;
486 if [ feature.get-values preserve-test-targets : $(properties) ] = off
487 || $(.preserve-test-targets) = off
489 rmtemp-sources $(target) : $(targets-to-remove) ;
490 for local to-remove in $(targets-to-remove)
492 rmtemp-all-sources $(to-remove) ;
496 if ! [ feature.get-values testing.launcher : $(properties) ]
498 ## On VMS set default launcher to MCR
499 if [ os.name ] = VMS { LAUNCHER on $(target) = MCR ; }
503 .types-to-remove = EXE OBJ ;
505 local rule remove-test-targets ( targets + )
507 if $(.preserve-test-targets) = off
509 rmtemp-all-sources $(target) ;
513 local rule rmtemp-all-sources ( target )
516 local action = [ on $(target) return $(.action) ] ;
519 local action-sources = [ $(action).sources ] ;
520 for local source in $(action-sources)
522 local source-type = [ $(source).type ] ;
523 if $(source-type) in $(.types-to-remove)
525 sources += [ $(source).actual-name ] ;
529 # ECHO IGNORED: $(source) :: $(source-type) ;
534 rmtemp-sources $(target) : $(sources) ;
535 for local source in $(sources)
537 rmtemp-all-sources $(source) ;
543 local rule rmtemp-sources ( target : sources * )
547 TEMPORARY $(sources) ;
548 # Set a second action on target that will be executed after capture
549 # output action. The 'RmTemps' rule has the 'ignore' modifier so it is
550 # always considered succeeded. This is needed for 'run-fail' test. For
551 # that test the target will be marked with FAIL_EXPECTED, and without
552 # 'ignore' successful execution will be negated and be reported as
553 # failure. With 'ignore' we do not detect a case where removing files
554 # fails, but it is not likely to happen.
555 RmTemps $(target) : $(sources) ;
563 .SET_STATUS = "set status=%ERRORLEVEL%" ;
564 .RUN_OUTPUT_NL = "echo." ;
566 .EXIT_SUCCESS = "0" ;
567 .STATUS_0 = "%status% EQU 0 $(.THEN)" ;
568 .STATUS_NOT_0 = "%status% NEQ 0 $(.THEN)" ;
569 .VERBOSE = "%verbose% EQU 1 $(.THEN)" ;
571 .SHELL_SET = "set " ;
576 else if [ os.name ] = VMS
581 .STATUS = "''status'" ;
582 .SET_STATUS = "status=$STATUS" ;
583 .SAY = "pipe write sys$output" ; ## not really echo
584 .RUN_OUTPUT_NL = "$(.SAY) \"\"" ;
585 .THEN = "$(nl)then" ;
586 .EXIT_SUCCESS = "1" ;
587 .SUCCESS = "status .eq. $(.EXIT_SUCCESS) $(.THEN)" ;
588 .STATUS_0 = "status .eq. 0 $(.THEN)" ;
589 .STATUS_NOT_0 = "status .ne. 0 $(.THEN)" ;
590 .VERBOSE = "verbose .eq. 1 $(.THEN)" ;
599 .STATUS = "$status" ;
600 .SET_STATUS = "status=$?" ;
601 .RUN_OUTPUT_NL = "echo" ;
603 .EXIT_SUCCESS = "0" ;
604 .STATUS_0 = "test $status -eq 0 $(.THEN)" ;
605 .STATUS_NOT_0 = "test $status -ne 0 $(.THEN)" ;
606 .VERBOSE = "test $verbose -eq 1 $(.THEN)" ;
611 .NULLIN = "<" "/dev/null" ;
616 if --verbose-test in [ modules.peek : ARGV ]
622 .RM = [ common.rm-command ] ;
625 actions capture-output bind INPUT_FILES output-file
628 $(.SHELL_SET)status=$(DISABLE_TEST_EXECUTION)
630 echo Skipping test execution due to testing.execute=off
631 exit $(.EXIT_SUCCESS)
633 $(LAUNCHER) "$(>)" $(ARGS) "$(INPUT_FILES)" > "$(output-file)" 2>&1 $(.NULLIN)
635 $(.RUN_OUTPUT_NL) >> "$(output-file)"
636 echo EXIT STATUS: $(.STATUS) >> "$(output-file)"
638 $(.CP) "$(output-file)" "$(<)"
640 $(.SHELL_SET)verbose=$(.VERBOSE_TEST)
642 $(.SHELL_SET)verbose=1
645 echo ====== BEGIN OUTPUT ======
646 $(.CATENATE) "$(output-file)"
647 echo ====== END OUTPUT ======
653 actions quietly updated ignore piecemeal together RmTemps
660 actions capture-output bind INPUT_FILES output-file
663 $(.SHELL_SET)status=$(DISABLE_TEST_EXECUTION)
665 $(.SAY) "Skipping test execution due to testing.execute=off"
666 exit "$(.EXIT_SUCCESS)"
668 !! Execute twice - first for status, second for output
670 pipe $(LAUNCHER) $(>:W) $(ARGS) $(INPUT_FILES:W) 2>NL: >NL:
672 pipe $(LAUNCHER) $(>:W) $(ARGS) $(INPUT_FILES:W) | type sys$input /out=$(output-file:W)
674 !! Harmonize VMS success status with POSIX
676 $(.SHELL_SET)status="0"
678 $(.RUN_OUTPUT_NL) | append /new sys$input $(output-file:W)
679 $(.SAY) "EXIT STATUS: $(.STATUS)" | append /new sys$input $(output-file:W)
681 $(.CP) $(output-file:W) $(<:W)
683 $(.SHELL_SET)verbose=$(.VERBOSE_TEST)
685 $(.SHELL_SET)verbose=1
688 $(.SAY) "====== BEGIN OUTPUT ======"
689 $(.CATENATE) $(output-file:W)
690 $(.SAY) "====== END OUTPUT ======"
692 !! Harmonize VMS success status with POSIX on exit
694 $(.SHELL_SET)status="$(.EXIT_SUCCESS)"
699 actions quietly updated ignore piecemeal together RmTemps
705 .MAKE_FILE = [ common.file-creation-command ] ;
707 toolset.flags testing.unit-test LAUNCHER <testing.launcher> ;
708 toolset.flags testing.unit-test ARGS <testing.arg> ;
711 rule unit-test ( target : source : properties * )
713 run-path-setup $(target) : $(source) : $(properties) ;
715 if ! [ feature.get-values testing.launcher : $(properties) ]
717 ## On VMS set default launcher to MCR
718 if [ os.name ] = VMS { LAUNCHER on $(target) = MCR ; }
726 $(LAUNCHER) "$(>)" $(ARGS) && $(.MAKE_FILE) "$(<)"
734 pipe $(LAUNCHER) $(>:W) $(ARGS) && $(.MAKE_FILE) $(<:W)
738 IMPORT $(__name__) : compile compile-fail run run-fail link link-fail
739 : : compile compile-fail run run-fail link link-fail ;
742 # This is a composing generator to support cases where a generator for the
743 # specified target constructs other targets as well. One such example is msvc's
744 # exe generator that constructs both EXE and PDB targets.
745 type.register TIME : time ;
746 generators.register-composing testing.time : : TIME ;
749 # Note that this rule may be called multiple times for a single target in case
750 # there are multiple actions operating on the same target in sequence. One such
751 # example are msvc exe targets first created by a linker action and then updated
752 # with an embedded manifest file by a separate action.
753 rule record-time ( target : source : start end user system clock )
755 local src-string = [$(source:G=:J=",")"] " ;
756 USER_TIME on $(target) += $(src-string)$(user) ;
757 SYSTEM_TIME on $(target) += $(src-string)$(system) ;
758 CLOCK_TIME on $(target) += $(src-string)$(clock) ;
760 # We need the following variables because attempting to perform such
761 # variable expansion in actions would not work due to quotes getting treated
762 # as regular characters.
763 USER_TIME_SECONDS on $(target) += $(src-string)$(user)" seconds" ;
764 SYSTEM_TIME_SECONDS on $(target) += $(src-string)$(system)" seconds" ;
765 CLOCK_TIME_SECONDS on $(target) += $(src-string)$(clock)" seconds" ;
769 # Support for generating timing information for any main target. To use
770 # declare a custom make target that uses the testing.time generator rule
771 # specified here. For example:
773 # make main.cpp : main_cpp.pro : @do-something ;
774 # time main.time : main.cpp ;
775 # actions do-something
777 # sleep 2 && echo "$(<)" > "$(<)"
780 # The above will generate a "main.time", and echo to output, timing
781 # information for the action of source "main.cpp".
784 IMPORT testing : record-time : : testing.record-time ;
787 # Calling this rule requests that Boost Build time how long it takes to build
788 # the 'source' target and display the results both on the standard output and in
791 rule time ( target : sources + : properties * )
793 # Set up rule for recording timing information.
794 local action = [ on $(target) return $(.action) ] ;
795 for local action.source in [ $(action).sources ]
797 # Yes, this uses the private "actual-name" of the target action.
798 # But it's the only way to get at the real name of the sources
799 # given the context of header scanners.
800 __TIMING_RULE__ on [ $(action.source).actual-name ] = testing.record-time $(target) ;
803 # Make sure the sources get rebuilt any time we need to retrieve that
805 REBUILDS $(target) : $(sources) ;
811 echo user: $(USER_TIME)
812 echo system: $(SYSTEM_TIME)
813 echo clock: $(CLOCK_TIME)
815 echo user: $(USER_TIME_SECONDS) > "$(<)"
816 echo system: $(SYSTEM_TIME_SECONDS) >> "$(<)"
817 echo clock: $(CLOCK_TIME_SECONDS) >> "$(<)"
824 WRITE SYS$OUTPUT "user: ", "$(USER_TIME)"
825 WRITE SYS$OUTPUT "system: ", "(SYSTEM_TIME)"
826 WRITE SYS$OUTPUT "clock: ", "(CLOCK_TIME)"
828 PIPE WRITE SYS$OUTPUT "user: ", "$(USER_TIME_SECONDS)" | TYPE SYS$INPUT /OUT=$(<:W)
829 PIPE WRITE SYS$OUTPUT "system: ", "$(SYSTEM_TIME_SECONDS)" | APPEND /NEW SYS$INPUT $(<:W)
830 PIPE WRITE SYS$OUTPUT "clock: ", "$(CLOCK_TIME_SECONDS)" | APPEND /NEW SYS$INPUT $(<:W)