]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/tools/build/src/tools/testing.jam
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / boost / tools / build / src / tools / testing.jam
1 # Copyright 2005 Dave Abrahams
2 # Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
3 # Copyright 2014-2015 Rene Rivera
4 # Copyright 2014 Microsoft Corporation
5 # Distributed under the Boost Software License, Version 1.0.
6 # (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
7
8 # This module implements regression testing framework. It declares a number of
9 # main target rules which perform some action and, if the results are OK,
10 # creates an output file.
11 #
12 # The exact list of rules is:
13 # 'compile' -- creates .test file if compilation of sources was
14 # successful.
15 # 'compile-fail' -- creates .test file if compilation of sources failed.
16 # 'run' -- creates .test file is running of executable produced from
17 # sources was successful. Also leaves behind .output file
18 # with the output from program run.
19 # 'run-fail' -- same as above, but .test file is created if running fails.
20 #
21 # In all cases, presence of .test file is an indication that the test passed.
22 # For more convenient reporting, you might want to use C++ Boost regression
23 # testing utilities (see http://www.boost.org/more/regression.html).
24 #
25 # For historical reason, a 'unit-test' rule is available which has the same
26 # syntax as 'exe' and behaves just like 'run'.
27
28 # Things to do:
29 # - Teach compiler_status handle Jamfile.v2.
30 # Notes:
31 # - <no-warn> is not implemented, since it is Como-specific, and it is not
32 # clear how to implement it
33 # - std::locale-support is not implemented (it is used in one test).
34
35
36 import alias ;
37 import "class" ;
38 import common ;
39 import errors ;
40 import feature ;
41 import generators ;
42 import os ;
43 import path ;
44 import project ;
45 import property ;
46 import property-set ;
47 import regex ;
48 import sequence ;
49 import targets ;
50 import toolset ;
51 import type ;
52 import virtual-target ;
53
54
55 rule init ( )
56 {
57 }
58
59
60 # Feature controlling the command used to launch test programs.
61 feature.feature testing.launcher : : free optional ;
62
63 feature.feature test-info : : free incidental ;
64 feature.feature testing.arg : : free incidental ;
65 feature.feature testing.input-file : : free dependency ;
66
67 feature.feature preserve-test-targets : on off : incidental propagated ;
68
69 # Feature to control whether executable binaries are run as part of test.
70 # This can be used to just compile test cases in cross compilation situations.
71 feature.feature testing.execute : on off : incidental propagated ;
72 feature.set-default testing.execute : on ;
73
74 # Register target types.
75 type.register TEST : test ;
76 type.register COMPILE : : TEST ;
77 type.register COMPILE_FAIL : : TEST ;
78 type.register RUN_OUTPUT : run ;
79 type.register RUN : : TEST ;
80 type.register RUN_FAIL : : TEST ;
81 type.register LINK_FAIL : : TEST ;
82 type.register LINK : : TEST ;
83 type.register UNIT_TEST : passed : TEST ;
84
85
86 # Suffix to denote test target directory
87 #
88 .TEST-DIR-SUFFIX = ".test" ;
89 if [ os.name ] = VMS
90 {
91 .TEST-DIR-SUFFIX = "$test" ;
92 }
93
94 # Declare the rules which create main targets. While the 'type' module already
95 # creates rules with the same names for us, we need extra convenience: default
96 # name of main target, so write our own versions.
97
98 # Helper rule. Create a test target, using basename of first source if no target
99 # name is explicitly passed. Remembers the created target in a global variable.
100 #
101 rule make-test ( target-type : sources + : requirements * : target-name ? )
102 {
103 target-name ?= $(sources[1]:D=:S=) ;
104
105 # Having periods (".") in the target name is problematic because the typed
106 # generator will strip the suffix and use the bare name for the file
107 # targets. Even though the location-prefix averts problems most times it
108 # does not prevent ambiguity issues when referring to the test targets. For
109 # example when using the XML log output. So we rename the target to remove
110 # the periods, and provide an alias for users.
111 local real-name = [ regex.replace $(target-name) "[.]" "~" ] ;
112
113 local project = [ project.current ] ;
114 # The <location-prefix> forces the build system for generate paths in the
115 # form '$build_dir/array1$(.TEST-DIR-SUFFIX)/gcc/debug'. This is necessary
116 # to allow post-processing tools to work.
117 local t = [ targets.create-typed-target [ type.type-from-rule-name
118 $(target-type) ] : $(project) : $(real-name) : $(sources) :
119 $(requirements) <location-prefix>$(real-name)$(.TEST-DIR-SUFFIX) ] ;
120
121 # The alias to the real target, per period replacement above.
122 if $(real-name) != $(target-name)
123 {
124 alias $(target-name) : $(t) ;
125 }
126
127 # Remember the test (for --dump-tests). A good way would be to collect all
128 # given a project. This has some technical problems: e.g. we can not call
129 # this dump from a Jamfile since projects referred by 'build-project' are
130 # not available until the whole Jamfile has been loaded.
131 .all-tests += $(t) ;
132 return $(t) ;
133 }
134
135
136 # Note: passing more that one cpp file here is known to fail. Passing a cpp file
137 # and a library target works.
138 #
139 rule compile ( sources + : requirements * : target-name ? )
140 {
141 return [ make-test compile : $(sources) : $(requirements) : $(target-name) ]
142 ;
143 }
144
145
146 rule compile-fail ( sources + : requirements * : target-name ? )
147 {
148 return [ make-test compile-fail : $(sources) : $(requirements) :
149 $(target-name) ] ;
150 }
151
152
153 rule link ( sources + : requirements * : target-name ? )
154 {
155 return [ make-test link : $(sources) : $(requirements) : $(target-name) ] ;
156 }
157
158
159 rule link-fail ( sources + : requirements * : target-name ? )
160 {
161 return [ make-test link-fail : $(sources) : $(requirements) : $(target-name)
162 ] ;
163 }
164
165
166 rule handle-input-files ( input-files * )
167 {
168 if $(input-files[2])
169 {
170 # Check that sorting made when creating property-set instance will not
171 # change the ordering.
172 if [ sequence.insertion-sort $(input-files) ] != $(input-files)
173 {
174 errors.user-error "Names of input files must be sorted alphabetically"
175 : "due to internal limitations" ;
176 }
177 }
178 return <testing.input-file>$(input-files) ;
179 }
180
181
182 rule run ( sources + : args * : input-files * : requirements * : target-name ? :
183 default-build * )
184 {
185 requirements += <testing.arg>$(args:J=" ") ;
186 requirements += [ handle-input-files $(input-files) ] ;
187 return [ make-test run : $(sources) : $(requirements) : $(target-name) ] ;
188 }
189
190
191 rule run-fail ( sources + : args * : input-files * : requirements * :
192 target-name ? : default-build * )
193 {
194 requirements += <testing.arg>$(args:J=" ") ;
195 requirements += [ handle-input-files $(input-files) ] ;
196 return [ make-test run-fail : $(sources) : $(requirements) : $(target-name)
197 ] ;
198 }
199
200
201 # Use 'test-suite' as a synonym for 'alias', for backward compatibility.
202 IMPORT : alias : : test-suite ;
203
204
205 # For all main targets in 'project-module', which are typed targets with type
206 # derived from 'TEST', produce some interesting information.
207 #
208 rule dump-tests
209 {
210 for local t in $(.all-tests)
211 {
212 dump-test $(t) ;
213 }
214 }
215
216
217 # Given a project location in normalized form (slashes are forward), compute the
218 # name of the Boost library.
219 #
220 local rule get-library-name ( path )
221 {
222 # Path is in normalized form, so all slashes are forward.
223 local match1 = [ MATCH /(tools|libs)/(.*)/(test|example) : $(path) ] ;
224 local match2 = [ MATCH /(tools|libs)/(.*)$ : $(path) ] ;
225 local match3 = [ MATCH (/status$) : $(path) ] ;
226
227 if $(match1) { return $(match1[2]) ; }
228 else if $(match2) { return $(match2[2]) ; }
229 else if $(match3) { return "" ; }
230 else if --dump-tests in [ modules.peek : ARGV ]
231 {
232 # The 'run' rule and others might be used outside boost. In that case,
233 # just return the path, since the 'library name' makes no sense.
234 return $(path) ;
235 }
236 }
237
238
239 # Was an XML dump requested?
240 .out-xml = [ MATCH --out-xml=(.*) : [ modules.peek : ARGV ] ] ;
241
242
243 # Takes a target (instance of 'basic-target') and prints
244 # - its type
245 # - its name
246 # - comments specified via the <test-info> property
247 # - relative location of all source from the project root.
248 #
249 rule dump-test ( target )
250 {
251 local type = [ $(target).type ] ;
252 local name = [ $(target).name ] ;
253 local project = [ $(target).project ] ;
254
255 local project-root = [ $(project).get project-root ] ;
256 local library = [ get-library-name [ path.root [ $(project).get location ]
257 [ path.pwd ] ] ] ;
258 if $(library)
259 {
260 name = $(library)/$(name) ;
261 }
262
263 local sources = [ $(target).sources ] ;
264 local source-files ;
265 for local s in $(sources)
266 {
267 if [ class.is-a $(s) : file-reference ]
268 {
269 local location = [ path.root [ path.root [ $(s).name ]
270 [ $(s).location ] ] [ path.pwd ] ] ;
271
272 source-files += [ path.relative-to [ path.root $(project-root)
273 [ path.pwd ] ] $(location) ] ;
274 }
275 }
276
277 local target-name =
278 [ $(project).get location ] // [ $(target).name ] $(.TEST-DIR-SUFFIX) ;
279 target-name = $(target-name:J=) ;
280
281 local r = [ $(target).requirements ] ;
282 # Extract values of the <test-info> feature.
283 local test-info = [ $(r).get <test-info> ] ;
284
285 # If the user requested XML output on the command-line, add the test info to
286 # that XML file rather than dumping them to stdout.
287 if $(.out-xml)
288 {
289 local nl = "
290 " ;
291 .contents on $(.out-xml) +=
292 "$(nl) <test type=\"$(type)\" name=\"$(name)\">"
293 "$(nl) <target><![CDATA[$(target-name)]]></target>"
294 "$(nl) <info><![CDATA[$(test-info)]]></info>"
295 "$(nl) <source><![CDATA[$(source-files)]]></source>"
296 "$(nl) </test>"
297 ;
298 }
299 else
300 {
301 # Format them into a single string of quoted strings.
302 test-info = \"$(test-info:J=\"\ \")\" ;
303
304 ECHO boost-test($(type)) \"$(name)\" [$(test-info)] ":"
305 \"$(source-files)\" ;
306 }
307 }
308
309
310 # Register generators. Depending on target type, either 'expect-success' or
311 # 'expect-failure' rule will be used.
312 generators.register-standard testing.expect-success : OBJ : COMPILE ;
313 generators.register-standard testing.expect-failure : OBJ : COMPILE_FAIL ;
314 generators.register-standard testing.expect-success : RUN_OUTPUT : RUN ;
315 generators.register-standard testing.expect-failure : RUN_OUTPUT : RUN_FAIL ;
316 generators.register-standard testing.expect-failure : EXE : LINK_FAIL ;
317 generators.register-standard testing.expect-success : EXE : LINK ;
318
319 # Generator which runs an EXE and captures output.
320 generators.register-standard testing.capture-output : EXE : RUN_OUTPUT ;
321
322 # Generator which creates a target if sources run successfully. Differs from RUN
323 # in that run output is not captured. The reason why it exists is that the 'run'
324 # rule is much better for automated testing, but is not user-friendly (see
325 # http://article.gmane.org/gmane.comp.lib.boost.build/6353).
326 generators.register-standard testing.unit-test : EXE : UNIT_TEST ;
327
328
329 # The action rules called by generators.
330
331 # Causes the 'target' to exist after bjam invocation if and only if all the
332 # dependencies were successfully built.
333 #
334 rule expect-success ( target : dependency + : requirements * )
335 {
336 **passed** $(target) : $(dependency) ;
337 }
338
339
340 # Causes the 'target' to exist after bjam invocation if and only if all some of
341 # the dependencies were not successfully built.
342 #
343 rule expect-failure ( target : dependency + : properties * )
344 {
345 local grist = [ MATCH ^<(.*)> : $(dependency:G) ] ;
346 local marker = $(dependency:G=$(grist)*fail) ;
347 (failed-as-expected) $(marker) ;
348 FAIL_EXPECTED $(dependency) ;
349 LOCATE on $(marker) = [ on $(dependency) return $(LOCATE) ] ;
350 RMOLD $(marker) ;
351 DEPENDS $(marker) : $(dependency) ;
352 DEPENDS $(target) : $(marker) ;
353 **passed** $(target) : $(marker) ;
354 }
355
356
357 # The rule/action combination used to report successful passing of a test.
358 #
359 rule **passed**
360 {
361 remove-test-targets $(<) ;
362
363 # Dump all the tests, if needed. We do it here, since dump should happen
364 # only after all Jamfiles have been read, and there is no such place
365 # currently defined (but there should be).
366 if ! $(.dumped-tests) && ( --dump-tests in [ modules.peek : ARGV ] )
367 {
368 .dumped-tests = true ;
369 dump-tests ;
370 }
371
372 # Force deletion of the target, in case any dependencies failed to build.
373 RMOLD $(<) ;
374 }
375
376
377
378 # Used to create test files signifying passed tests.
379 #
380 actions **passed**
381 {
382 echo passed > "$(<)"
383 }
384
385 # Used to create replacement object files that do not get created during tests
386 # that are expected to fail.
387 #
388 actions (failed-as-expected)
389 {
390 echo failed as expected > "$(<)"
391 }
392
393
394 if [ os.name ] = VMS
395 {
396 actions **passed**
397 {
398 PIPE WRITE SYS$OUTPUT "passed" > $(<:W)
399 }
400
401 actions (failed-as-expected)
402 {
403 PIPE WRITE SYS$OUTPUT "failed as expected" > $(<:W)
404 }
405 }
406
407 rule run-path-setup ( target : source : properties * )
408 {
409 # For testing, we need to make sure that all dynamic libraries needed by the
410 # test are found. So, we collect all paths from dependency libraries (via
411 # xdll-path property) and add whatever explicit dll-path user has specified.
412 # The resulting paths are added to the environment on each test invocation.
413 local dll-paths = [ feature.get-values <dll-path> : $(properties) ] ;
414 dll-paths += [ feature.get-values <xdll-path> : $(properties) ] ;
415 dll-paths += [ on $(source) return $(RUN_PATH) ] ;
416 dll-paths = [ sequence.unique $(dll-paths) ] ;
417 if $(dll-paths)
418 {
419 translate-to-os = path.native ;
420 if [ os.name ] = VMS
421 {
422 translate-to-os = path.to-VMS ;
423 }
424 dll-paths = [ sequence.transform $(translate-to-os) : $(dll-paths) ] ;
425 PATH_SETUP on $(target) = [ common.prepend-path-variable-command
426 [ os.shared-library-path-variable ] : $(dll-paths) ] ;
427 }
428 }
429
430
431 local argv = [ modules.peek : ARGV ] ;
432
433 toolset.flags testing.capture-output ARGS <testing.arg> ;
434 toolset.flags testing.capture-output INPUT_FILES <testing.input-file> ;
435 toolset.flags testing.capture-output LAUNCHER <testing.launcher> ;
436
437 .preserve-test-targets = on ;
438 if --remove-test-targets in [ modules.peek : ARGV ]
439 {
440 .preserve-test-targets = off ;
441 }
442
443
444 # Runs executable 'sources' and stores stdout in file 'target'. Unless
445 # --preserve-test-targets command line option has been specified, removes the
446 # executable. The 'target-to-remove' parameter controls what should be removed:
447 # - if 'none', does not remove anything, ever
448 # - if empty, removes 'source'
449 # - if non-empty and not 'none', contains a list of sources to remove.
450 #
451 rule capture-output ( target : source : properties * : targets-to-remove * )
452 {
453 output-file on $(target) = $(target:S=.output) ;
454 LOCATE on $(target:S=.output) = [ on $(target) return $(LOCATE) ] ;
455
456 # The INCLUDES kill a warning about independent target...
457 INCLUDES $(target) : $(target:S=.output) ;
458 # but it also puts .output into dependency graph, so we must tell jam it is
459 # OK if it cannot find the target or updating rule.
460 NOCARE $(target:S=.output) ;
461
462 # This has two-fold effect. First it adds input files to the dependency
463 # graph, preventing a warning. Second, it causes input files to be bound
464 # before target is created. Therefore, they are bound using SEARCH setting
465 # on them and not LOCATE setting of $(target), as in other case (due to jam
466 # bug).
467 DEPENDS $(target) : [ on $(target) return $(INPUT_FILES) ] ;
468
469 if $(targets-to-remove) = none
470 {
471 targets-to-remove = ;
472 }
473 else if ! $(targets-to-remove)
474 {
475 targets-to-remove = $(source) ;
476 }
477
478 run-path-setup $(target) : $(source) : $(properties) ;
479
480 DISABLE_TEST_EXECUTION on $(target) = 0 ;
481 if [ feature.get-values testing.execute : $(properties) ] = off
482 {
483 DISABLE_TEST_EXECUTION on $(target) = 1 ;
484 }
485
486 if [ feature.get-values preserve-test-targets : $(properties) ] = off
487 || $(.preserve-test-targets) = off
488 {
489 rmtemp-sources $(target) : $(targets-to-remove) ;
490 for local to-remove in $(targets-to-remove)
491 {
492 rmtemp-all-sources $(to-remove) ;
493 }
494 }
495
496 if ! [ feature.get-values testing.launcher : $(properties) ]
497 {
498 ## On VMS set default launcher to MCR
499 if [ os.name ] = VMS { LAUNCHER on $(target) = MCR ; }
500 }
501 }
502
503 .types-to-remove = EXE OBJ ;
504
505 local rule remove-test-targets ( targets + )
506 {
507 if $(.preserve-test-targets) = off
508 {
509 rmtemp-all-sources $(target) ;
510 }
511 }
512
513 local rule rmtemp-all-sources ( target )
514 {
515 local sources ;
516 local action = [ on $(target) return $(.action) ] ;
517 if $(action)
518 {
519 local action-sources = [ $(action).sources ] ;
520 for local source in $(action-sources)
521 {
522 local source-type = [ $(source).type ] ;
523 if $(source-type) in $(.types-to-remove)
524 {
525 sources += [ $(source).actual-name ] ;
526 }
527 else
528 {
529 # ECHO IGNORED: $(source) :: $(source-type) ;
530 }
531 }
532 if $(sources)
533 {
534 rmtemp-sources $(target) : $(sources) ;
535 for local source in $(sources)
536 {
537 rmtemp-all-sources $(source) ;
538 }
539 }
540 }
541 }
542
543 local rule rmtemp-sources ( target : sources * )
544 {
545 if $(sources)
546 {
547 TEMPORARY $(sources) ;
548 # Set a second action on target that will be executed after capture
549 # output action. The 'RmTemps' rule has the 'ignore' modifier so it is
550 # always considered succeeded. This is needed for 'run-fail' test. For
551 # that test the target will be marked with FAIL_EXPECTED, and without
552 # 'ignore' successful execution will be negated and be reported as
553 # failure. With 'ignore' we do not detect a case where removing files
554 # fails, but it is not likely to happen.
555 RmTemps $(target) : $(sources) ;
556 }
557 }
558
559
560 if [ os.name ] = NT
561 {
562 .STATUS = %status% ;
563 .SET_STATUS = "set status=%ERRORLEVEL%" ;
564 .RUN_OUTPUT_NL = "echo." ;
565 .THEN = "(" ;
566 .EXIT_SUCCESS = "0" ;
567 .STATUS_0 = "%status% EQU 0 $(.THEN)" ;
568 .STATUS_NOT_0 = "%status% NEQ 0 $(.THEN)" ;
569 .VERBOSE = "%verbose% EQU 1 $(.THEN)" ;
570 .ENDIF = ")" ;
571 .SHELL_SET = "set " ;
572 .CATENATE = type ;
573 .CP = copy ;
574 .NULLIN = ;
575 }
576 else if [ os.name ] = VMS
577 {
578 local nl = "
579 " ;
580
581 .STATUS = "''status'" ;
582 .SET_STATUS = "status=$STATUS" ;
583 .SAY = "pipe write sys$output" ; ## not really echo
584 .RUN_OUTPUT_NL = "$(.SAY) \"\"" ;
585 .THEN = "$(nl)then" ;
586 .EXIT_SUCCESS = "1" ;
587 .SUCCESS = "status .eq. $(.EXIT_SUCCESS) $(.THEN)" ;
588 .STATUS_0 = "status .eq. 0 $(.THEN)" ;
589 .STATUS_NOT_0 = "status .ne. 0 $(.THEN)" ;
590 .VERBOSE = "verbose .eq. 1 $(.THEN)" ;
591 .ENDIF = "endif" ;
592 .SHELL_SET = "" ;
593 .CATENATE = type ;
594 .CP = copy ;
595 .NULLIN = ;
596 }
597 else
598 {
599 .STATUS = "$status" ;
600 .SET_STATUS = "status=$?" ;
601 .RUN_OUTPUT_NL = "echo" ;
602 .THEN = "; then" ;
603 .EXIT_SUCCESS = "0" ;
604 .STATUS_0 = "test $status -eq 0 $(.THEN)" ;
605 .STATUS_NOT_0 = "test $status -ne 0 $(.THEN)" ;
606 .VERBOSE = "test $verbose -eq 1 $(.THEN)" ;
607 .ENDIF = "fi" ;
608 .SHELL_SET = "" ;
609 .CATENATE = cat ;
610 .CP = cp ;
611 .NULLIN = "<" "/dev/null" ;
612 }
613
614
615 .VERBOSE_TEST = 0 ;
616 if --verbose-test in [ modules.peek : ARGV ]
617 {
618 .VERBOSE_TEST = 1 ;
619 }
620
621
622 .RM = [ common.rm-command ] ;
623
624
625 actions capture-output bind INPUT_FILES output-file
626 {
627 $(PATH_SETUP)
628 $(.SHELL_SET)status=$(DISABLE_TEST_EXECUTION)
629 if $(.STATUS_NOT_0)
630 echo Skipping test execution due to testing.execute=off
631 exit $(.EXIT_SUCCESS)
632 $(.ENDIF)
633 $(LAUNCHER) "$(>)" $(ARGS) "$(INPUT_FILES)" > "$(output-file)" 2>&1 $(.NULLIN)
634 $(.SET_STATUS)
635 $(.RUN_OUTPUT_NL) >> "$(output-file)"
636 echo EXIT STATUS: $(.STATUS) >> "$(output-file)"
637 if $(.STATUS_0)
638 $(.CP) "$(output-file)" "$(<)"
639 $(.ENDIF)
640 $(.SHELL_SET)verbose=$(.VERBOSE_TEST)
641 if $(.STATUS_NOT_0)
642 $(.SHELL_SET)verbose=1
643 $(.ENDIF)
644 if $(.VERBOSE)
645 echo ====== BEGIN OUTPUT ======
646 $(.CATENATE) "$(output-file)"
647 echo ====== END OUTPUT ======
648 $(.ENDIF)
649 exit $(.STATUS)
650 }
651
652
653 actions quietly updated ignore piecemeal together RmTemps
654 {
655 $(.RM) "$(>)"
656 }
657
658 if [ os.name ] = VMS
659 {
660 actions capture-output bind INPUT_FILES output-file
661 {
662 $(PATH_SETUP)
663 $(.SHELL_SET)status=$(DISABLE_TEST_EXECUTION)
664 if $(.STATUS_NOT_0)
665 $(.SAY) "Skipping test execution due to testing.execute=off"
666 exit "$(.EXIT_SUCCESS)"
667 $(.ENDIF)
668 !! Execute twice - first for status, second for output
669 set noon
670 pipe $(LAUNCHER) $(>:W) $(ARGS) $(INPUT_FILES:W) 2>NL: >NL:
671 $(.SET_STATUS)
672 pipe $(LAUNCHER) $(>:W) $(ARGS) $(INPUT_FILES:W) | type sys$input /out=$(output-file:W)
673 set on
674 !! Harmonize VMS success status with POSIX
675 if $(.SUCCESS)
676 $(.SHELL_SET)status="0"
677 $(.ENDIF)
678 $(.RUN_OUTPUT_NL) | append /new sys$input $(output-file:W)
679 $(.SAY) "EXIT STATUS: $(.STATUS)" | append /new sys$input $(output-file:W)
680 if $(.STATUS_0)
681 $(.CP) $(output-file:W) $(<:W)
682 $(.ENDIF)
683 $(.SHELL_SET)verbose=$(.VERBOSE_TEST)
684 if $(.STATUS_NOT_0)
685 $(.SHELL_SET)verbose=1
686 $(.ENDIF)
687 if $(.VERBOSE)
688 $(.SAY) "====== BEGIN OUTPUT ======"
689 $(.CATENATE) $(output-file:W)
690 $(.SAY) "====== END OUTPUT ======"
691 $(.ENDIF)
692 !! Harmonize VMS success status with POSIX on exit
693 if $(.STATUS_0)
694 $(.SHELL_SET)status="$(.EXIT_SUCCESS)"
695 $(.ENDIF)
696 exit "$(.STATUS)"
697 }
698
699 actions quietly updated ignore piecemeal together RmTemps
700 {
701 $(.RM) $(>:WJ=;*,);*
702 }
703 }
704
705 .MAKE_FILE = [ common.file-creation-command ] ;
706
707 toolset.flags testing.unit-test LAUNCHER <testing.launcher> ;
708 toolset.flags testing.unit-test ARGS <testing.arg> ;
709
710
711 rule unit-test ( target : source : properties * )
712 {
713 run-path-setup $(target) : $(source) : $(properties) ;
714
715 if ! [ feature.get-values testing.launcher : $(properties) ]
716 {
717 ## On VMS set default launcher to MCR
718 if [ os.name ] = VMS { LAUNCHER on $(target) = MCR ; }
719 }
720 }
721
722
723 actions unit-test
724 {
725 $(PATH_SETUP)
726 $(LAUNCHER) "$(>)" $(ARGS) && $(.MAKE_FILE) "$(<)"
727 }
728
729 if [ os.name ] = VMS
730 {
731 actions unit-test
732 {
733 $(PATH_SETUP)
734 pipe $(LAUNCHER) $(>:W) $(ARGS) && $(.MAKE_FILE) $(<:W)
735 }
736 }
737
738 IMPORT $(__name__) : compile compile-fail run run-fail link link-fail
739 : : compile compile-fail run run-fail link link-fail ;
740
741
742 # This is a composing generator to support cases where a generator for the
743 # specified target constructs other targets as well. One such example is msvc's
744 # exe generator that constructs both EXE and PDB targets.
745 type.register TIME : time ;
746 generators.register-composing testing.time : : TIME ;
747
748
749 # Note that this rule may be called multiple times for a single target in case
750 # there are multiple actions operating on the same target in sequence. One such
751 # example are msvc exe targets first created by a linker action and then updated
752 # with an embedded manifest file by a separate action.
753 rule record-time ( target : source : start end user system clock )
754 {
755 local src-string = [$(source:G=:J=",")"] " ;
756 USER_TIME on $(target) += $(src-string)$(user) ;
757 SYSTEM_TIME on $(target) += $(src-string)$(system) ;
758 CLOCK_TIME on $(target) += $(src-string)$(clock) ;
759
760 # We need the following variables because attempting to perform such
761 # variable expansion in actions would not work due to quotes getting treated
762 # as regular characters.
763 USER_TIME_SECONDS on $(target) += $(src-string)$(user)" seconds" ;
764 SYSTEM_TIME_SECONDS on $(target) += $(src-string)$(system)" seconds" ;
765 CLOCK_TIME_SECONDS on $(target) += $(src-string)$(clock)" seconds" ;
766 }
767
768
769 # Support for generating timing information for any main target. To use
770 # declare a custom make target that uses the testing.time generator rule
771 # specified here. For example:
772 #
773 # make main.cpp : main_cpp.pro : @do-something ;
774 # time main.time : main.cpp ;
775 # actions do-something
776 # {
777 # sleep 2 && echo "$(<)" > "$(<)"
778 # }
779 #
780 # The above will generate a "main.time", and echo to output, timing
781 # information for the action of source "main.cpp".
782
783
784 IMPORT testing : record-time : : testing.record-time ;
785
786
787 # Calling this rule requests that Boost Build time how long it takes to build
788 # the 'source' target and display the results both on the standard output and in
789 # the 'target' file.
790 #
791 rule time ( target : sources + : properties * )
792 {
793 # Set up rule for recording timing information.
794 local action = [ on $(target) return $(.action) ] ;
795 for local action.source in [ $(action).sources ]
796 {
797 # Yes, this uses the private "actual-name" of the target action.
798 # But it's the only way to get at the real name of the sources
799 # given the context of header scanners.
800 __TIMING_RULE__ on [ $(action.source).actual-name ] = testing.record-time $(target) ;
801 }
802
803 # Make sure the sources get rebuilt any time we need to retrieve that
804 # information.
805 REBUILDS $(target) : $(sources) ;
806 }
807
808
809 actions time
810 {
811 echo user: $(USER_TIME)
812 echo system: $(SYSTEM_TIME)
813 echo clock: $(CLOCK_TIME)
814
815 echo user: $(USER_TIME_SECONDS) > "$(<)"
816 echo system: $(SYSTEM_TIME_SECONDS) >> "$(<)"
817 echo clock: $(CLOCK_TIME_SECONDS) >> "$(<)"
818 }
819
820 if [ os.name ] = VMS
821 {
822 actions time
823 {
824 WRITE SYS$OUTPUT "user: ", "$(USER_TIME)"
825 WRITE SYS$OUTPUT "system: ", "(SYSTEM_TIME)"
826 WRITE SYS$OUTPUT "clock: ", "(CLOCK_TIME)"
827
828 PIPE WRITE SYS$OUTPUT "user: ", "$(USER_TIME_SECONDS)" | TYPE SYS$INPUT /OUT=$(<:W)
829 PIPE WRITE SYS$OUTPUT "system: ", "$(SYSTEM_TIME_SECONDS)" | APPEND /NEW SYS$INPUT $(<:W)
830 PIPE WRITE SYS$OUTPUT "clock: ", "$(CLOCK_TIME_SECONDS)" | APPEND /NEW SYS$INPUT $(<:W)
831 }
832 }