]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | #!/usr/bin/python |
2 | ||
3 | # Copyright 2002-2005 Dave Abrahams. | |
4 | # Copyright 2002-2006 Vladimir Prus. | |
5 | # Distributed under the Boost Software License, Version 1.0. | |
6 | # (See accompanying file LICENSE_1_0.txt or copy at | |
7 | # http://www.boost.org/LICENSE_1_0.txt) | |
8 | ||
9 | import BoostBuild | |
10 | ||
11 | import os | |
12 | import os.path | |
13 | import sys | |
14 | ||
15 | xml = "--xml" in sys.argv | |
16 | toolset = BoostBuild.get_toolset() | |
17 | ||
18 | ||
19 | # Clear environment for testing. | |
20 | # | |
21 | for s in ("BOOST_ROOT", "BOOST_BUILD_PATH", "JAM_TOOLSET", "BCCROOT", | |
22 | "MSVCDir", "MSVC", "MSVCNT", "MINGW", "watcom"): | |
23 | try: | |
24 | del os.environ[s] | |
25 | except: | |
26 | pass | |
27 | ||
28 | BoostBuild.set_defer_annotations(1) | |
29 | ||
30 | ||
31 | def run_tests(critical_tests, other_tests): | |
32 | """ | |
33 | Runs first the critical_tests and then the other_tests. | |
34 | ||
35 | Writes the name of the first failed test to test_results.txt. Critical | |
36 | tests are run in the specified order, other tests are run starting with the | |
37 | one that failed first on the last test run. | |
38 | ||
39 | """ | |
40 | last_failed = last_failed_test() | |
41 | other_tests = reorder_tests(other_tests, last_failed) | |
42 | all_tests = critical_tests + other_tests | |
43 | ||
44 | invocation_dir = os.getcwd() | |
45 | max_test_name_len = 10 | |
46 | for x in all_tests: | |
47 | if len(x) > max_test_name_len: | |
48 | max_test_name_len = len(x) | |
49 | ||
50 | pass_count = 0 | |
51 | failures_count = 0 | |
52 | ||
53 | for test in all_tests: | |
54 | if not xml: | |
55 | print("%%-%ds :" % max_test_name_len % test), | |
56 | ||
57 | passed = 0 | |
58 | try: | |
59 | __import__(test) | |
60 | passed = 1 | |
61 | except KeyboardInterrupt: | |
62 | """This allows us to abort the testing manually using Ctrl-C.""" | |
63 | raise | |
64 | except SystemExit: | |
65 | """This is the regular way our test scripts are supposed to report | |
66 | test failures.""" | |
67 | except: | |
68 | exc_type, exc_value, exc_tb = sys.exc_info() | |
69 | try: | |
70 | BoostBuild.annotation("failure - unhandled exception", "%s - " | |
71 | "%s" % (exc_type.__name__, exc_value)) | |
72 | BoostBuild.annotate_stack_trace(exc_tb) | |
73 | finally: | |
74 | # Explicitly clear a hard-to-garbage-collect traceback | |
75 | # related reference cycle as per documented sys.exc_info() | |
76 | # usage suggestion. | |
77 | del exc_tb | |
78 | ||
79 | if passed: | |
80 | pass_count += 1 | |
81 | else: | |
82 | failures_count += 1 | |
83 | if failures_count == 1: | |
84 | f = open(os.path.join(invocation_dir, "test_results.txt"), "w") | |
85 | try: | |
86 | f.write(test) | |
87 | finally: | |
88 | f.close() | |
89 | ||
90 | # Restore the current directory, which might have been changed by the | |
91 | # test. | |
92 | os.chdir(invocation_dir) | |
93 | ||
94 | if not xml: | |
95 | if passed: | |
96 | print("PASSED") | |
97 | else: | |
98 | print("FAILED") | |
99 | else: | |
100 | rs = "succeed" | |
101 | if not passed: | |
102 | rs = "fail" | |
103 | print """ | |
104 | <test-log library="build" test-name="%s" test-type="run" toolset="%s" test-program="%s" target-directory="%s"> | |
105 | <run result="%s">""" % (test, toolset, "tools/build/v2/test/" + test + ".py", | |
106 | "boost/bin.v2/boost.build.tests/" + toolset + "/" + test, rs) | |
107 | if not passed: | |
108 | BoostBuild.flush_annotations(1) | |
109 | print """ | |
110 | </run> | |
111 | </test-log> | |
112 | """ | |
113 | sys.stdout.flush() # Makes testing under emacs more entertaining. | |
114 | BoostBuild.clear_annotations() | |
115 | ||
116 | # Erase the file on success. | |
117 | if failures_count == 0: | |
118 | open("test_results.txt", "w").close() | |
119 | ||
120 | if not xml: | |
121 | print """ | |
122 | === Test summary === | |
123 | PASS: %d | |
124 | FAIL: %d | |
125 | """ % (pass_count, failures_count) | |
126 | ||
127 | # exit with failure with failures | |
128 | if failures_count > 0: | |
129 | sys.exit(1) | |
130 | ||
131 | def last_failed_test(): | |
132 | "Returns the name of the last failed test or None." | |
133 | try: | |
134 | f = open("test_results.txt") | |
135 | try: | |
136 | return f.read().strip() | |
137 | finally: | |
138 | f.close() | |
139 | except Exception: | |
140 | return None | |
141 | ||
142 | ||
143 | def reorder_tests(tests, first_test): | |
144 | try: | |
145 | n = tests.index(first_test) | |
146 | return [first_test] + tests[:n] + tests[n + 1:] | |
147 | except ValueError: | |
148 | return tests | |
149 | ||
150 | ||
151 | critical_tests = ["unit_tests", "module_actions", "startup_v2", "core_d12", | |
152 | "core_typecheck", "core_delete_module", "core_language", "core_arguments", | |
153 | "core_varnames", "core_import_module"] | |
154 | ||
155 | # We want to collect debug information about the test site before running any | |
156 | # of the tests, but only when not running the tests interactively. Then the | |
157 | # user can easily run this always-failing test directly to see what it would | |
158 | # have returned and there is no need to have it spoil a possible 'all tests | |
159 | # passed' result. | |
160 | if xml: | |
161 | critical_tests.insert(0, "collect_debug_info") | |
162 | ||
163 | tests = ["absolute_sources", | |
164 | "alias", | |
165 | "alternatives", | |
166 | "bad_dirname", | |
167 | "build_dir", | |
168 | "build_file", | |
169 | "build_no", | |
170 | "builtin_echo", | |
171 | "builtin_exit", | |
172 | "builtin_glob", | |
7c673cae FG |
173 | "builtin_split_by_characters", |
174 | "bzip2", | |
175 | "c_file", | |
176 | "chain", | |
177 | "clean", | |
178 | "composite", | |
179 | "conditionals", | |
180 | "conditionals2", | |
181 | "conditionals3", | |
182 | "conditionals_multiple", | |
183 | "configuration", | |
184 | "copy_time", | |
185 | "core_action_output", | |
186 | "core_action_status", | |
187 | "core_actions_quietly", | |
188 | "core_at_file", | |
189 | "core_bindrule", | |
190 | "core_jamshell", | |
191 | "core_multifile_actions", | |
192 | "core_nt_cmd_line", | |
193 | "core_option_d2", | |
194 | "core_option_l", | |
195 | "core_option_n", | |
196 | "core_parallel_actions", | |
197 | "core_parallel_multifile_actions_1", | |
198 | "core_parallel_multifile_actions_2", | |
199 | "core_source_line_tracking", | |
200 | "core_update_now", | |
201 | "core_variables_in_actions", | |
202 | "custom_generator", | |
b32b8144 FG |
203 | "debugger", |
204 | "debugger-mi", | |
7c673cae FG |
205 | "default_build", |
206 | "default_features", | |
207 | # This test is known to be broken itself. | |
208 | # "default_toolset", | |
209 | "dependency_property", | |
210 | "dependency_test", | |
211 | "direct_request_test", | |
212 | "disambiguation", | |
213 | "dll_path", | |
214 | "double_loading", | |
215 | "duplicate", | |
216 | "example_libraries", | |
217 | "example_make", | |
218 | "exit_status", | |
219 | "expansion", | |
220 | "explicit", | |
221 | "feature_cxxflags", | |
222 | "free_features_request", | |
223 | "generator_selection", | |
224 | "generators_test", | |
225 | "implicit_dependency", | |
226 | "indirect_conditional", | |
227 | "inherit_toolset", | |
228 | "inherited_dependency", | |
229 | "inline", | |
230 | "lib_source_property", | |
231 | "library_chain", | |
232 | "library_property", | |
233 | "link", | |
234 | "load_order", | |
235 | "loop", | |
236 | "make_rule", | |
237 | "message", | |
238 | "ndebug", | |
239 | "no_type", | |
240 | "notfile", | |
241 | "ordered_include", | |
242 | "out_of_tree", | |
243 | "path_features", | |
244 | "prebuilt", | |
245 | "print", | |
246 | "project_dependencies", | |
247 | "project_glob", | |
248 | "project_id", | |
249 | "project_root_constants", | |
250 | "project_root_rule", | |
251 | "project_test3", | |
252 | "project_test4", | |
253 | "property_expansion", | |
254 | "rebuilds", | |
255 | "regression", | |
256 | "relative_sources", | |
257 | "remove_requirement", | |
258 | "rescan_header", | |
259 | "resolution", | |
260 | "scanner_causing_rebuilds", | |
261 | "searched_lib", | |
262 | "skipping", | |
263 | "sort_rule", | |
264 | "source_locations", | |
265 | "source_order", | |
266 | "space_in_path", | |
267 | "stage", | |
268 | "standalone", | |
269 | "static_and_shared_library", | |
270 | "suffix", | |
271 | "tag", | |
272 | "test_result_dumping", | |
273 | "test_rc", | |
274 | "testing_support", | |
275 | "timedata", | |
276 | "toolset_requirements", | |
277 | "unit_test", | |
278 | "unused", | |
279 | "use_requirements", | |
280 | "using", | |
281 | "wrapper", | |
282 | "wrong_project", | |
283 | "zlib" | |
284 | ] | |
285 | ||
286 | if os.name == "posix": | |
287 | tests.append("symlink") | |
288 | # On Windows, library order is not important, so skip this test. Besides, | |
289 | # it fails ;-). Further, the test relies on the fact that on Linux, one can | |
290 | # build a shared library with unresolved symbols. This is not true on | |
291 | # Windows, even with cygwin gcc. | |
292 | ||
293 | # Disable this test until we figure how to address failures due to --as-needed being default now. | |
294 | # if "CYGWIN" not in os.uname()[0]: | |
295 | # tests.append("library_order") | |
296 | ||
297 | if toolset.startswith("gcc"): | |
298 | tests.append("gcc_runtime") | |
299 | ||
300 | if toolset.startswith("gcc") or toolset.startswith("msvc"): | |
301 | tests.append("pch") | |
302 | ||
b32b8144 FG |
303 | # Disable on OSX as it doesn't seem to work for unknown reasons. |
304 | if sys.platform != 'darwin': | |
305 | tests.append("builtin_glob_archive") | |
306 | ||
7c673cae FG |
307 | if "--extras" in sys.argv: |
308 | tests.append("boostbook") | |
309 | tests.append("qt4") | |
310 | tests.append("qt5") | |
311 | tests.append("example_qt4") | |
312 | # Requires ./whatever.py to work, so is not guaranted to work everywhere. | |
313 | tests.append("example_customization") | |
314 | # Requires gettext tools. | |
315 | tests.append("example_gettext") | |
316 | elif not xml: | |
317 | print("Note: skipping extra tests") | |
318 | ||
319 | run_tests(critical_tests, tests) |