]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | # BSD LICENSE |
2 | # | |
3 | # Copyright(c) 2010-2014 Intel Corporation. All rights reserved. | |
4 | # All rights reserved. | |
5 | # | |
6 | # Redistribution and use in source and binary forms, with or without | |
7 | # modification, are permitted provided that the following conditions | |
8 | # are met: | |
9 | # | |
10 | # * Redistributions of source code must retain the above copyright | |
11 | # notice, this list of conditions and the following disclaimer. | |
12 | # * Redistributions in binary form must reproduce the above copyright | |
13 | # notice, this list of conditions and the following disclaimer in | |
14 | # the documentation and/or other materials provided with the | |
15 | # distribution. | |
16 | # * Neither the name of Intel Corporation nor the names of its | |
17 | # contributors may be used to endorse or promote products derived | |
18 | # from this software without specific prior written permission. | |
19 | # | |
20 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
21 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
22 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
23 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
24 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
25 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
26 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
27 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
28 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
29 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
30 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
31 | ||
32 | # The main logic behind running autotests in parallel | |
33 | ||
34 | import StringIO | |
35 | import csv | |
36 | import multiprocessing | |
37 | import pexpect | |
38 | import re | |
39 | import subprocess | |
40 | import sys | |
41 | import time | |
42 | ||
43 | # wait for prompt | |
44 | ||
45 | ||
46 | def wait_prompt(child): | |
47 | try: | |
48 | child.sendline() | |
49 | result = child.expect(["RTE>>", pexpect.TIMEOUT, pexpect.EOF], | |
50 | timeout=120) | |
51 | except: | |
52 | return False | |
53 | if result == 0: | |
54 | return True | |
55 | else: | |
56 | return False | |
57 | ||
58 | # run a test group | |
59 | # each result tuple in results list consists of: | |
60 | # result value (0 or -1) | |
61 | # result string | |
62 | # test name | |
63 | # total test run time (double) | |
64 | # raw test log | |
65 | # test report (if not available, should be None) | |
66 | # | |
67 | # this function needs to be outside AutotestRunner class | |
68 | # because otherwise Pool won't work (or rather it will require | |
69 | # quite a bit of effort to make it work). | |
70 | ||
71 | ||
72 | def run_test_group(cmdline, test_group): | |
73 | results = [] | |
74 | child = None | |
75 | start_time = time.time() | |
76 | startuplog = None | |
77 | ||
78 | # run test app | |
79 | try: | |
80 | # prepare logging of init | |
81 | startuplog = StringIO.StringIO() | |
82 | ||
83 | print >>startuplog, "\n%s %s\n" % ("=" * 20, test_group["Prefix"]) | |
84 | print >>startuplog, "\ncmdline=%s" % cmdline | |
85 | ||
86 | child = pexpect.spawn(cmdline, logfile=startuplog) | |
87 | ||
88 | # wait for target to boot | |
89 | if not wait_prompt(child): | |
90 | child.close() | |
91 | ||
92 | results.append((-1, | |
93 | "Fail [No prompt]", | |
94 | "Start %s" % test_group["Prefix"], | |
95 | time.time() - start_time, | |
96 | startuplog.getvalue(), | |
97 | None)) | |
98 | ||
99 | # mark all tests as failed | |
100 | for test in test_group["Tests"]: | |
101 | results.append((-1, "Fail [No prompt]", test["Name"], | |
102 | time.time() - start_time, "", None)) | |
103 | # exit test | |
104 | return results | |
105 | ||
106 | except: | |
107 | results.append((-1, | |
108 | "Fail [Can't run]", | |
109 | "Start %s" % test_group["Prefix"], | |
110 | time.time() - start_time, | |
111 | startuplog.getvalue(), | |
112 | None)) | |
113 | ||
114 | # mark all tests as failed | |
115 | for t in test_group["Tests"]: | |
116 | results.append((-1, "Fail [Can't run]", t["Name"], | |
117 | time.time() - start_time, "", None)) | |
118 | # exit test | |
119 | return results | |
120 | ||
121 | # startup was successful | |
122 | results.append((0, "Success", "Start %s" % test_group["Prefix"], | |
123 | time.time() - start_time, startuplog.getvalue(), None)) | |
124 | ||
125 | # parse the binary for available test commands | |
126 | binary = cmdline.split()[0] | |
127 | stripped = 'not stripped' not in subprocess.check_output(['file', binary]) | |
128 | if not stripped: | |
129 | symbols = subprocess.check_output(['nm', binary]).decode('utf-8') | |
130 | avail_cmds = re.findall('test_register_(\w+)', symbols) | |
131 | ||
132 | # run all tests in test group | |
133 | for test in test_group["Tests"]: | |
134 | ||
135 | # create log buffer for each test | |
136 | # in multiprocessing environment, the logging would be | |
137 | # interleaved and will create a mess, hence the buffering | |
138 | logfile = StringIO.StringIO() | |
139 | child.logfile = logfile | |
140 | ||
141 | result = () | |
142 | ||
143 | # make a note when the test started | |
144 | start_time = time.time() | |
145 | ||
146 | try: | |
147 | # print test name to log buffer | |
148 | print >>logfile, "\n%s %s\n" % ("-" * 20, test["Name"]) | |
149 | ||
150 | # run test function associated with the test | |
151 | if stripped or test["Command"] in avail_cmds: | |
152 | result = test["Func"](child, test["Command"]) | |
153 | else: | |
154 | result = (0, "Skipped [Not Available]") | |
155 | ||
156 | # make a note when the test was finished | |
157 | end_time = time.time() | |
158 | ||
159 | # append test data to the result tuple | |
160 | result += (test["Name"], end_time - start_time, | |
161 | logfile.getvalue()) | |
162 | ||
163 | # call report function, if any defined, and supply it with | |
164 | # target and complete log for test run | |
165 | if test["Report"]: | |
166 | report = test["Report"](self.target, log) | |
167 | ||
168 | # append report to results tuple | |
169 | result += (report,) | |
170 | else: | |
171 | # report is None | |
172 | result += (None,) | |
173 | except: | |
174 | # make a note when the test crashed | |
175 | end_time = time.time() | |
176 | ||
177 | # mark test as failed | |
178 | result = (-1, "Fail [Crash]", test["Name"], | |
179 | end_time - start_time, logfile.getvalue(), None) | |
180 | finally: | |
181 | # append the results to the results list | |
182 | results.append(result) | |
183 | ||
184 | # regardless of whether test has crashed, try quitting it | |
185 | try: | |
186 | child.sendline("quit") | |
187 | child.close() | |
188 | # if the test crashed, just do nothing instead | |
189 | except: | |
190 | # nop | |
191 | pass | |
192 | ||
193 | # return test results | |
194 | return results | |
195 | ||
196 | ||
197 | # class representing an instance of autotests run | |
198 | class AutotestRunner: | |
199 | cmdline = "" | |
200 | parallel_test_groups = [] | |
201 | non_parallel_test_groups = [] | |
202 | logfile = None | |
203 | csvwriter = None | |
204 | target = "" | |
205 | start = None | |
206 | n_tests = 0 | |
207 | fails = 0 | |
208 | log_buffers = [] | |
209 | blacklist = [] | |
210 | whitelist = [] | |
211 | ||
212 | def __init__(self, cmdline, target, blacklist, whitelist): | |
213 | self.cmdline = cmdline | |
214 | self.target = target | |
215 | self.blacklist = blacklist | |
216 | self.whitelist = whitelist | |
217 | ||
218 | # log file filename | |
219 | logfile = "%s.log" % target | |
220 | csvfile = "%s.csv" % target | |
221 | ||
222 | self.logfile = open(logfile, "w") | |
223 | csvfile = open(csvfile, "w") | |
224 | self.csvwriter = csv.writer(csvfile) | |
225 | ||
226 | # prepare results table | |
227 | self.csvwriter.writerow(["test_name", "test_result", "result_str"]) | |
228 | ||
229 | # set up cmdline string | |
230 | def __get_cmdline(self, test): | |
231 | cmdline = self.cmdline | |
232 | ||
233 | # append memory limitations for each test | |
234 | # otherwise tests won't run in parallel | |
235 | if "i686" not in self.target: | |
236 | cmdline += " --socket-mem=%s" % test["Memory"] | |
237 | else: | |
238 | # affinitize startup so that tests don't fail on i686 | |
239 | cmdline = "taskset 1 " + cmdline | |
240 | cmdline += " -m " + str(sum(map(int, test["Memory"].split(",")))) | |
241 | ||
242 | # set group prefix for autotest group | |
243 | # otherwise they won't run in parallel | |
244 | cmdline += " --file-prefix=%s" % test["Prefix"] | |
245 | ||
246 | return cmdline | |
247 | ||
248 | def add_parallel_test_group(self, test_group): | |
249 | self.parallel_test_groups.append(test_group) | |
250 | ||
251 | def add_non_parallel_test_group(self, test_group): | |
252 | self.non_parallel_test_groups.append(test_group) | |
253 | ||
254 | def __process_results(self, results): | |
255 | # this iterates over individual test results | |
256 | for i, result in enumerate(results): | |
257 | ||
258 | # increase total number of tests that were run | |
259 | # do not include "start" test | |
260 | if i > 0: | |
261 | self.n_tests += 1 | |
262 | ||
263 | # unpack result tuple | |
264 | test_result, result_str, test_name, \ | |
265 | test_time, log, report = result | |
266 | ||
267 | # get total run time | |
268 | cur_time = time.time() | |
269 | total_time = int(cur_time - self.start) | |
270 | ||
271 | # print results, test run time and total time since start | |
272 | result = ("%s:" % test_name).ljust(30) | |
273 | result += result_str.ljust(29) | |
274 | result += "[%02dm %02ds]" % (test_time / 60, test_time % 60) | |
275 | ||
276 | # don't print out total time every line, it's the same anyway | |
277 | if i == len(results) - 1: | |
278 | print(result, | |
279 | "[%02dm %02ds]" % (total_time / 60, total_time % 60)) | |
280 | else: | |
281 | print(result) | |
282 | ||
283 | # if test failed and it wasn't a "start" test | |
284 | if test_result < 0 and not i == 0: | |
285 | self.fails += 1 | |
286 | ||
287 | # collect logs | |
288 | self.log_buffers.append(log) | |
289 | ||
290 | # create report if it exists | |
291 | if report: | |
292 | try: | |
293 | f = open("%s_%s_report.rst" % | |
294 | (self.target, test_name), "w") | |
295 | except IOError: | |
296 | print("Report for %s could not be created!" % test_name) | |
297 | else: | |
298 | with f: | |
299 | f.write(report) | |
300 | ||
301 | # write test result to CSV file | |
302 | if i != 0: | |
303 | self.csvwriter.writerow([test_name, test_result, result_str]) | |
304 | ||
305 | # this function iterates over test groups and removes each | |
306 | # test that is not in whitelist/blacklist | |
307 | def __filter_groups(self, test_groups): | |
308 | groups_to_remove = [] | |
309 | ||
310 | # filter out tests from parallel test groups | |
311 | for i, test_group in enumerate(test_groups): | |
312 | ||
313 | # iterate over a copy so that we could safely delete individual | |
314 | # tests | |
315 | for test in test_group["Tests"][:]: | |
316 | test_id = test["Command"] | |
317 | ||
318 | # dump tests are specified in full e.g. "Dump_mempool" | |
319 | if "_autotest" in test_id: | |
320 | test_id = test_id[:-len("_autotest")] | |
321 | ||
322 | # filter out blacklisted/whitelisted tests | |
323 | if self.blacklist and test_id in self.blacklist: | |
324 | test_group["Tests"].remove(test) | |
325 | continue | |
326 | if self.whitelist and test_id not in self.whitelist: | |
327 | test_group["Tests"].remove(test) | |
328 | continue | |
329 | ||
330 | # modify or remove original group | |
331 | if len(test_group["Tests"]) > 0: | |
332 | test_groups[i] = test_group | |
333 | else: | |
334 | # remember which groups should be deleted | |
335 | # put the numbers backwards so that we start | |
336 | # deleting from the end, not from the beginning | |
337 | groups_to_remove.insert(0, i) | |
338 | ||
339 | # remove test groups that need to be removed | |
340 | for i in groups_to_remove: | |
341 | del test_groups[i] | |
342 | ||
343 | return test_groups | |
344 | ||
345 | # iterate over test groups and run tests associated with them | |
346 | def run_all_tests(self): | |
347 | # filter groups | |
348 | self.parallel_test_groups = \ | |
349 | self.__filter_groups(self.parallel_test_groups) | |
350 | self.non_parallel_test_groups = \ | |
351 | self.__filter_groups(self.non_parallel_test_groups) | |
352 | ||
353 | # create a pool of worker threads | |
354 | pool = multiprocessing.Pool(processes=1) | |
355 | ||
356 | results = [] | |
357 | ||
358 | # whatever happens, try to save as much logs as possible | |
359 | try: | |
360 | ||
361 | # create table header | |
362 | print("") | |
363 | print("Test name".ljust(30), "Test result".ljust(29), | |
364 | "Test".center(9), "Total".center(9)) | |
365 | print("=" * 80) | |
366 | ||
367 | # make a note of tests start time | |
368 | self.start = time.time() | |
369 | ||
370 | # assign worker threads to run test groups | |
371 | for test_group in self.parallel_test_groups: | |
372 | result = pool.apply_async(run_test_group, | |
373 | [self.__get_cmdline(test_group), | |
374 | test_group]) | |
375 | results.append(result) | |
376 | ||
377 | # iterate while we have group execution results to get | |
378 | while len(results) > 0: | |
379 | ||
380 | # iterate over a copy to be able to safely delete results | |
381 | # this iterates over a list of group results | |
382 | for group_result in results[:]: | |
383 | ||
384 | # if the thread hasn't finished yet, continue | |
385 | if not group_result.ready(): | |
386 | continue | |
387 | ||
388 | res = group_result.get() | |
389 | ||
390 | self.__process_results(res) | |
391 | ||
392 | # remove result from results list once we're done with it | |
393 | results.remove(group_result) | |
394 | ||
395 | # run non_parallel tests. they are run one by one, synchronously | |
396 | for test_group in self.non_parallel_test_groups: | |
397 | group_result = run_test_group( | |
398 | self.__get_cmdline(test_group), test_group) | |
399 | ||
400 | self.__process_results(group_result) | |
401 | ||
402 | # get total run time | |
403 | cur_time = time.time() | |
404 | total_time = int(cur_time - self.start) | |
405 | ||
406 | # print out summary | |
407 | print("=" * 80) | |
408 | print("Total run time: %02dm %02ds" % (total_time / 60, | |
409 | total_time % 60)) | |
410 | if self.fails != 0: | |
411 | print("Number of failed tests: %s" % str(self.fails)) | |
412 | ||
413 | # write summary to logfile | |
414 | self.logfile.write("Summary\n") | |
415 | self.logfile.write("Target: ".ljust(15) + "%s\n" % self.target) | |
416 | self.logfile.write("Tests: ".ljust(15) + "%i\n" % self.n_tests) | |
417 | self.logfile.write("Failed tests: ".ljust( | |
418 | 15) + "%i\n" % self.fails) | |
419 | except: | |
420 | print("Exception occurred") | |
421 | print(sys.exc_info()) | |
422 | self.fails = 1 | |
423 | ||
424 | # drop logs from all executions to a logfile | |
425 | for buf in self.log_buffers: | |
426 | self.logfile.write(buf.replace("\r", "")) | |
427 | ||
428 | return self.fails |