]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/tools/build/src/tools/testing.py
import new upstream nautilus stable release 14.2.8
[ceph.git] / ceph / src / boost / tools / build / src / tools / testing.py
1 # Status: ported, except for --out-xml
2 # Base revision: 64488
3 #
4 # Copyright 2005 Dave Abrahams
5 # Copyright 2002, 2003, 2004, 2005, 2010 Vladimir Prus
6 # Distributed under the Boost Software License, Version 1.0.
7 # (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
8
9 # This module implements regression testing framework. It declares a number of
10 # main target rules which perform some action and, if the results are OK,
11 # creates an output file.
12 #
13 # The exact list of rules is:
14 # 'compile' -- creates .test file if compilation of sources was
15 # successful.
16 # 'compile-fail' -- creates .test file if compilation of sources failed.
17 # 'run' -- creates .test file is running of executable produced from
18 # sources was successful. Also leaves behind .output file
19 # with the output from program run.
20 # 'run-fail' -- same as above, but .test file is created if running fails.
21 #
22 # In all cases, presence of .test file is an indication that the test passed.
23 # For more convenient reporting, you might want to use C++ Boost regression
24 # testing utilities (see http://www.boost.org/more/regression.html).
25 #
26 # For historical reason, a 'unit-test' rule is available which has the same
27 # syntax as 'exe' and behaves just like 'run'.
28
29 # Things to do:
30 # - Teach compiler_status handle Jamfile.v2.
31 # Notes:
32 # - <no-warn> is not implemented, since it is Como-specific, and it is not
33 # clear how to implement it
34 # - std::locale-support is not implemented (it is used in one test).
35
36 import b2.build.feature as feature
37 import b2.build.type as type
38 import b2.build.targets as targets
39 import b2.build.generators as generators
40 import b2.build.toolset as toolset
41 import b2.tools.common as common
42 import b2.util.option as option
43 import b2.build_system as build_system
44
45
46
47 from b2.manager import get_manager
48 from b2.util import stem, bjam_signature, is_iterable_typed
49 from b2.util.sequence import unique
50
51 import bjam
52
53 import re
54 import os.path
55 import sys
56
57 def init():
58 pass
59
60 # Feature controlling the command used to lanch test programs.
61 feature.feature("testing.launcher", [], ["free", "optional"])
62
63 feature.feature("test-info", [], ["free", "incidental"])
64 feature.feature("testing.arg", [], ["free", "incidental"])
65 feature.feature("testing.input-file", [], ["free", "dependency"])
66
67 feature.feature("preserve-test-targets", ["on", "off"], ["incidental", "propagated"])
68
69 # Register target types.
70 type.register("TEST", ["test"])
71 type.register("COMPILE", [], "TEST")
72 type.register("COMPILE_FAIL", [], "TEST")
73
74 type.register("RUN_OUTPUT", ["run"])
75 type.register("RUN", [], "TEST")
76 type.register("RUN_FAIL", [], "TEST")
77
78 type.register("LINK", [], "TEST")
79 type.register("LINK_FAIL", [], "TEST")
80 type.register("UNIT_TEST", ["passed"], "TEST")
81
82 __all_tests = []
83
84 # Declare the rules which create main targets. While the 'type' module already
85 # creates rules with the same names for us, we need extra convenience: default
86 # name of main target, so write our own versions.
87
88 # Helper rule. Create a test target, using basename of first source if no target
89 # name is explicitly passed. Remembers the created target in a global variable.
90 def make_test(target_type, sources, requirements, target_name=None):
91 assert isinstance(target_type, basestring)
92 assert is_iterable_typed(sources, basestring)
93 assert is_iterable_typed(requirements, basestring)
94 assert isinstance(target_type, basestring) or target_type is None
95 if not target_name:
96 target_name = stem(os.path.basename(sources[0]))
97
98 # Having periods (".") in the target name is problematic because the typed
99 # generator will strip the suffix and use the bare name for the file
100 # targets. Even though the location-prefix averts problems most times it
101 # does not prevent ambiguity issues when referring to the test targets. For
102 # example when using the XML log output. So we rename the target to remove
103 # the periods, and provide an alias for users.
104 real_name = target_name.replace(".", "~")
105
106 project = get_manager().projects().current()
107 # The <location-prefix> forces the build system for generate paths in the
108 # form '$build_dir/array1.test/gcc/debug'. This is necessary to allow
109 # post-processing tools to work.
110 t = get_manager().targets().create_typed_target(
111 type.type_from_rule_name(target_type), project, real_name, sources,
112 requirements + ["<location-prefix>" + real_name + ".test"], [], [])
113
114 # The alias to the real target, per period replacement above.
115 if real_name != target_name:
116 get_manager().projects().project_rules().rules["alias"](
117 target_name, [t])
118
119 # Remember the test (for --dump-tests). A good way would be to collect all
120 # given a project. This has some technical problems: e.g. we can not call
121 # this dump from a Jamfile since projects referred by 'build-project' are
122 # not available until the whole Jamfile has been loaded.
123 __all_tests.append(t)
124 return t
125
126
127 # Note: passing more that one cpp file here is known to fail. Passing a cpp file
128 # and a library target works.
129 #
130 @bjam_signature((["sources", "*"], ["requirements", "*"], ["target_name", "?"]))
131 def compile(sources, requirements, target_name=None):
132 return make_test("compile", sources, requirements, target_name)
133
134 @bjam_signature((["sources", "*"], ["requirements", "*"], ["target_name", "?"]))
135 def compile_fail(sources, requirements, target_name=None):
136 return make_test("compile-fail", sources, requirements, target_name)
137
138 @bjam_signature((["sources", "*"], ["requirements", "*"], ["target_name", "?"]))
139 def link(sources, requirements, target_name=None):
140 return make_test("link", sources, requirements, target_name)
141
142 @bjam_signature((["sources", "*"], ["requirements", "*"], ["target_name", "?"]))
143 def link_fail(sources, requirements, target_name=None):
144 return make_test("link-fail", sources, requirements, target_name)
145
146 def handle_input_files(input_files):
147 if len(input_files) > 1:
148 # Check that sorting made when creating property-set instance will not
149 # change the ordering.
150 if sorted(input_files) != input_files:
151 get_manager().errors()("Names of input files must be sorted alphabetically\n" +
152 "due to internal limitations")
153 return ["<testing.input-file>" + f for f in input_files]
154
155 @bjam_signature((["sources", "*"], ["args", "*"], ["input_files", "*"],
156 ["requirements", "*"], ["target_name", "?"],
157 ["default_build", "*"]))
158 def run(sources, args, input_files, requirements, target_name=None, default_build=[]):
159 if args:
160 requirements.append("<testing.arg>" + " ".join(args))
161 requirements.extend(handle_input_files(input_files))
162 return make_test("run", sources, requirements, target_name)
163
164 @bjam_signature((["sources", "*"], ["args", "*"], ["input_files", "*"],
165 ["requirements", "*"], ["target_name", "?"],
166 ["default_build", "*"]))
167 def run_fail(sources, args, input_files, requirements, target_name=None, default_build=[]):
168 if args:
169 requirements.append("<testing.arg>" + " ".join(args))
170 requirements.extend(handle_input_files(input_files))
171 return make_test("run-fail", sources, requirements, target_name)
172
173 # Register all the rules
174 for name in ["compile", "compile-fail", "link", "link-fail", "run", "run-fail"]:
175 get_manager().projects().add_rule(name, getattr(sys.modules[__name__], name.replace("-", "_")))
176
177 # Use 'test-suite' as a synonym for 'alias', for backward compatibility.
178 from b2.build.alias import alias
179 get_manager().projects().add_rule("test-suite", alias)
180
181 # For all main targets in 'project-module', which are typed targets with type
182 # derived from 'TEST', produce some interesting information.
183 #
184 def dump_tests():
185 for t in __all_tests:
186 dump_test(t)
187
188 # Given a project location in normalized form (slashes are forward), compute the
189 # name of the Boost library.
190 #
191 __ln1 = re.compile("/(tools|libs)/(.*)/(test|example)")
192 __ln2 = re.compile("/(tools|libs)/(.*)$")
193 __ln3 = re.compile("(/status$)")
194 def get_library_name(path):
195 assert isinstance(path, basestring)
196
197 path = path.replace("\\", "/")
198 match1 = __ln1.match(path)
199 match2 = __ln2.match(path)
200 match3 = __ln3.match(path)
201
202 if match1:
203 return match1.group(2)
204 elif match2:
205 return match2.group(2)
206 elif match3:
207 return ""
208 elif option.get("dump-tests", False, True):
209 # The 'run' rule and others might be used outside boost. In that case,
210 # just return the path, since the 'library name' makes no sense.
211 return path
212
213 # Was an XML dump requested?
214 __out_xml = option.get("out-xml", False, True)
215
216 # Takes a target (instance of 'basic-target') and prints
217 # - its type
218 # - its name
219 # - comments specified via the <test-info> property
220 # - relative location of all source from the project root.
221 #
222 def dump_test(target):
223 assert isinstance(target, targets.AbstractTarget)
224 type = target.type()
225 name = target.name()
226 project = target.project()
227
228 project_root = project.get('project-root')
229 library = get_library_name(os.path.abspath(project.get('location')))
230 if library:
231 name = library + "/" + name
232
233 sources = target.sources()
234 source_files = []
235 for s in sources:
236 if isinstance(s, targets.FileReference):
237 location = os.path.abspath(os.path.join(s.location(), s.name()))
238 source_files.append(os.path.relpath(location, os.path.abspath(project_root)))
239
240 target_name = project.get('location') + "//" + target.name() + ".test"
241
242 test_info = target.requirements().get('test-info')
243 test_info = " ".join('"' + ti + '"' for ti in test_info)
244
245 # If the user requested XML output on the command-line, add the test info to
246 # that XML file rather than dumping them to stdout.
247 #if $(.out-xml)
248 #{
249 # local nl = "
250 #" ;
251 # .contents on $(.out-xml) +=
252 # "$(nl) <test type=\"$(type)\" name=\"$(name)\">"
253 # "$(nl) <target><![CDATA[$(target-name)]]></target>"
254 # "$(nl) <info><![CDATA[$(test-info)]]></info>"
255 # "$(nl) <source><![CDATA[$(source-files)]]></source>"
256 # "$(nl) </test>"
257 # ;
258 # }
259 # else
260
261 source_files = " ".join('"' + s + '"' for s in source_files)
262 if test_info:
263 print 'boost-test(%s) "%s" [%s] : %s' % (type, name, test_info, source_files)
264 else:
265 print 'boost-test(%s) "%s" : %s' % (type, name, source_files)
266
267 # Register generators. Depending on target type, either 'expect-success' or
268 # 'expect-failure' rule will be used.
269 generators.register_standard("testing.expect-success", ["OBJ"], ["COMPILE"])
270 generators.register_standard("testing.expect-failure", ["OBJ"], ["COMPILE_FAIL"])
271 generators.register_standard("testing.expect-success", ["RUN_OUTPUT"], ["RUN"])
272 generators.register_standard("testing.expect-failure", ["RUN_OUTPUT"], ["RUN_FAIL"])
273 generators.register_standard("testing.expect-success", ["EXE"], ["LINK"])
274 generators.register_standard("testing.expect-failure", ["EXE"], ["LINK_FAIL"])
275
276 # Generator which runs an EXE and captures output.
277 generators.register_standard("testing.capture-output", ["EXE"], ["RUN_OUTPUT"])
278
279 # Generator which creates a target if sources run successfully. Differs from RUN
280 # in that run output is not captured. The reason why it exists is that the 'run'
281 # rule is much better for automated testing, but is not user-friendly (see
282 # http://article.gmane.org/gmane.comp.lib.boost.build/6353).
283 generators.register_standard("testing.unit-test", ["EXE"], ["UNIT_TEST"])
284
285 # FIXME: if those calls are after bjam.call, then bjam will crash
286 # when toolset.flags calls bjam.caller.
287 toolset.flags("testing.capture-output", "ARGS", [], ["<testing.arg>"])
288 toolset.flags("testing.capture-output", "INPUT_FILES", [], ["<testing.input-file>"])
289 toolset.flags("testing.capture-output", "LAUNCHER", [], ["<testing.launcher>"])
290
291 toolset.flags("testing.unit-test", "LAUNCHER", [], ["<testing.launcher>"])
292 toolset.flags("testing.unit-test", "ARGS", [], ["<testing.arg>"])
293
294 # This is a composing generator to support cases where a generator for the
295 # specified target constructs other targets as well. One such example is msvc's
296 # exe generator that constructs both EXE and PDB targets.
297 type.register("TIME", ["time"])
298 generators.register_composing("testing.time", [], ["TIME"])
299
300
301 # The following code sets up actions for this module. It's pretty convoluted,
302 # but the basic points is that we most of actions are defined by Jam code
303 # contained in testing-aux.jam, which we load into Jam module named 'testing'
304
305 def run_path_setup(target, sources, ps):
306 if __debug__:
307 from ..build.property_set import PropertySet
308 assert is_iterable_typed(target, basestring) or isinstance(target, basestring)
309 assert is_iterable_typed(sources, basestring)
310 assert isinstance(ps, PropertySet)
311 # For testing, we need to make sure that all dynamic libraries needed by the
312 # test are found. So, we collect all paths from dependency libraries (via
313 # xdll-path property) and add whatever explicit dll-path user has specified.
314 # The resulting paths are added to the environment on each test invocation.
315 dll_paths = ps.get('dll-path')
316 dll_paths.extend(ps.get('xdll-path'))
317 dll_paths.extend(bjam.call("get-target-variable", sources, "RUN_PATH"))
318 dll_paths = unique(dll_paths)
319 if dll_paths:
320 bjam.call("set-target-variable", target, "PATH_SETUP",
321 common.prepend_path_variable_command(
322 common.shared_library_path_variable(), dll_paths))
323
324 def capture_output_setup(target, sources, ps):
325 if __debug__:
326 from ..build.property_set import PropertySet
327 assert is_iterable_typed(target, basestring)
328 assert is_iterable_typed(sources, basestring)
329 assert isinstance(ps, PropertySet)
330 run_path_setup(target[0], sources, ps)
331
332 if ps.get('preserve-test-targets') == ['off']:
333 bjam.call("set-target-variable", target, "REMOVE_TEST_TARGETS", "1")
334
335 get_manager().engine().register_bjam_action("testing.capture-output",
336 capture_output_setup)
337
338
339 path = os.path.dirname(__file__)
340 import b2.util.os_j
341 get_manager().projects().project_rules()._import_rule("testing", "os.name",
342 b2.util.os_j.name)
343 import b2.tools.common
344 get_manager().projects().project_rules()._import_rule("testing", "common.rm-command",
345 b2.tools.common.rm_command)
346 get_manager().projects().project_rules()._import_rule("testing", "common.file-creation-command",
347 b2.tools.common.file_creation_command)
348
349 bjam.call("load", "testing", os.path.join(path, "testing-aux.jam"))
350
351
352 for name in ["expect-success", "expect-failure", "time"]:
353 get_manager().engine().register_bjam_action("testing." + name)
354
355 get_manager().engine().register_bjam_action("testing.unit-test",
356 run_path_setup)
357
358 if option.get("dump-tests", False, True):
359 build_system.add_pre_build_hook(dump_tests)