]>
Commit | Line | Data |
---|---|---|
594b1259 MW |
1 | #!/usr/bin/env python |
2 | ||
3 | # | |
4 | # topotest.py | |
5 | # Library of helper functions for NetDEF Topology Tests | |
6 | # | |
7 | # Copyright (c) 2016 by | |
8 | # Network Device Education Foundation, Inc. ("NetDEF") | |
9 | # | |
10 | # Permission to use, copy, modify, and/or distribute this software | |
11 | # for any purpose with or without fee is hereby granted, provided | |
12 | # that the above copyright notice and this permission notice appear | |
13 | # in all copies. | |
14 | # | |
15 | # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES | |
16 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
17 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR | |
18 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY | |
19 | # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, | |
20 | # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS | |
21 | # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | |
22 | # OF THIS SOFTWARE. | |
23 | # | |
24 | ||
49581587 | 25 | import difflib |
50c40bde | 26 | import errno |
fd858290 | 27 | import functools |
594b1259 | 28 | import glob |
49581587 CH |
29 | import json |
30 | import os | |
31 | import pdb | |
32 | import platform | |
33 | import re | |
34 | import resource | |
35 | import signal | |
594b1259 | 36 | import subprocess |
49581587 | 37 | import sys |
1fca63c1 | 38 | import tempfile |
570f25d8 | 39 | import time |
49581587 | 40 | from copy import deepcopy |
594b1259 | 41 | |
49581587 | 42 | import lib.topolog as topolog |
6c131bd3 RZ |
43 | from lib.topolog import logger |
44 | ||
04ce2b97 RZ |
45 | if sys.version_info[0] > 2: |
46 | import configparser | |
c8e5983d | 47 | from collections.abc import Mapping |
04ce2b97 RZ |
48 | else: |
49 | import ConfigParser as configparser | |
c8e5983d | 50 | from collections import Mapping |
04ce2b97 | 51 | |
49581587 CH |
52 | from lib import micronet |
53 | from lib.micronet_compat import Node | |
594b1259 | 54 | |
3f950192 | 55 | g_extra_config = {} |
701a0192 | 56 | |
a53c08bc | 57 | |
49581587 CH |
58 | def get_logs_path(rundir): |
59 | logspath = topolog.get_test_logdir() | |
60 | return os.path.join(rundir, logspath) | |
61 | ||
0b25370e | 62 | |
79f6fdeb | 63 | def gdb_core(obj, daemon, corefiles): |
701a0192 | 64 | gdbcmds = """ |
79f6fdeb DL |
65 | info threads |
66 | bt full | |
67 | disassemble | |
68 | up | |
69 | disassemble | |
70 | up | |
71 | disassemble | |
72 | up | |
73 | disassemble | |
74 | up | |
75 | disassemble | |
76 | up | |
77 | disassemble | |
701a0192 | 78 | """ |
79 | gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")] | |
79f6fdeb DL |
80 | gdbcmds = [item for sl in gdbcmds for item in sl] |
81 | ||
82 | daemon_path = os.path.join(obj.daemondir, daemon) | |
83 | backtrace = subprocess.check_output( | |
701a0192 | 84 | ["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds |
79f6fdeb DL |
85 | ) |
86 | sys.stderr.write( | |
701a0192 | 87 | "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon) |
79f6fdeb DL |
88 | ) |
89 | sys.stderr.write("%s" % backtrace) | |
90 | return backtrace | |
787e7624 | 91 | |
701a0192 | 92 | |
3668ed8d RZ |
93 | class json_cmp_result(object): |
94 | "json_cmp result class for better assertion messages" | |
95 | ||
96 | def __init__(self): | |
97 | self.errors = [] | |
98 | ||
99 | def add_error(self, error): | |
100 | "Append error message to the result" | |
2db5888d RZ |
101 | for line in error.splitlines(): |
102 | self.errors.append(line) | |
3668ed8d RZ |
103 | |
104 | def has_errors(self): | |
105 | "Returns True if there were errors, otherwise False." | |
106 | return len(self.errors) > 0 | |
107 | ||
849224d4 G |
108 | def gen_report(self): |
109 | headline = ["Generated JSON diff error report:", ""] | |
110 | return headline + self.errors | |
111 | ||
7fe06d55 | 112 | def __str__(self): |
849224d4 G |
113 | return ( |
114 | "Generated JSON diff error report:\n\n\n" + "\n".join(self.errors) + "\n\n" | |
115 | ) | |
7fe06d55 | 116 | |
da63d5b3 | 117 | |
849224d4 | 118 | def gen_json_diff_report(d1, d2, exact=False, path="> $", acc=(0, "")): |
7bd28cfc | 119 | """ |
849224d4 | 120 | Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye. |
7bd28cfc | 121 | """ |
849224d4 G |
122 | |
123 | def dump_json(v): | |
124 | if isinstance(v, (dict, list)): | |
125 | return "\t" + "\t".join( | |
126 | json.dumps(v, indent=4, separators=(",", ": ")).splitlines(True) | |
787e7624 | 127 | ) |
849224d4 G |
128 | else: |
129 | return "'{}'".format(v) | |
130 | ||
131 | def json_type(v): | |
132 | if isinstance(v, (list, tuple)): | |
133 | return "Array" | |
134 | elif isinstance(v, dict): | |
135 | return "Object" | |
136 | elif isinstance(v, (int, float)): | |
137 | return "Number" | |
138 | elif isinstance(v, bool): | |
139 | return "Boolean" | |
140 | elif isinstance(v, str): | |
141 | return "String" | |
142 | elif v == None: | |
143 | return "null" | |
144 | ||
145 | def get_errors(other_acc): | |
146 | return other_acc[1] | |
147 | ||
148 | def get_errors_n(other_acc): | |
149 | return other_acc[0] | |
150 | ||
151 | def add_error(acc, msg, points=1): | |
152 | return (acc[0] + points, acc[1] + "{}: {}\n".format(path, msg)) | |
153 | ||
154 | def merge_errors(acc, other_acc): | |
155 | return (acc[0] + other_acc[0], acc[1] + other_acc[1]) | |
156 | ||
157 | def add_idx(idx): | |
158 | return "{}[{}]".format(path, idx) | |
159 | ||
160 | def add_key(key): | |
161 | return "{}->{}".format(path, key) | |
162 | ||
163 | def has_errors(other_acc): | |
164 | return other_acc[0] > 0 | |
165 | ||
166 | if d2 == "*" or ( | |
167 | not isinstance(d1, (list, dict)) | |
168 | and not isinstance(d2, (list, dict)) | |
169 | and d1 == d2 | |
170 | ): | |
171 | return acc | |
172 | elif ( | |
173 | not isinstance(d1, (list, dict)) | |
174 | and not isinstance(d2, (list, dict)) | |
175 | and d1 != d2 | |
176 | ): | |
177 | acc = add_error( | |
178 | acc, | |
179 | "d1 has element with value '{}' but in d2 it has value '{}'".format(d1, d2), | |
787e7624 | 180 | ) |
849224d4 G |
181 | elif ( |
182 | isinstance(d1, list) | |
183 | and isinstance(d2, list) | |
184 | and ((len(d2) > 0 and d2[0] == "__ordered__") or exact) | |
185 | ): | |
186 | if not exact: | |
187 | del d2[0] | |
188 | if len(d1) != len(d2): | |
189 | acc = add_error( | |
190 | acc, | |
191 | "d1 has Array of length {} but in d2 it is of length {}".format( | |
192 | len(d1), len(d2) | |
193 | ), | |
787e7624 | 194 | ) |
849224d4 G |
195 | else: |
196 | for idx, v1, v2 in zip(range(0, len(d1)), d1, d2): | |
197 | acc = merge_errors( | |
198 | acc, gen_json_diff_report(v1, v2, exact=exact, path=add_idx(idx)) | |
199 | ) | |
200 | elif isinstance(d1, list) and isinstance(d2, list): | |
201 | if len(d1) < len(d2): | |
202 | acc = add_error( | |
203 | acc, | |
204 | "d1 has Array of length {} but in d2 it is of length {}".format( | |
205 | len(d1), len(d2) | |
206 | ), | |
207 | ) | |
208 | else: | |
209 | for idx2, v2 in zip(range(0, len(d2)), d2): | |
210 | found_match = False | |
211 | closest_diff = None | |
212 | closest_idx = None | |
213 | for idx1, v1 in zip(range(0, len(d1)), d1): | |
b3100f6c G |
214 | tmp_v1 = deepcopy(v1) |
215 | tmp_v2 = deepcopy(v2) | |
216 | tmp_diff = gen_json_diff_report(tmp_v1, tmp_v2, path=add_idx(idx1)) | |
849224d4 G |
217 | if not has_errors(tmp_diff): |
218 | found_match = True | |
219 | del d1[idx1] | |
220 | break | |
221 | elif not closest_diff or get_errors_n(tmp_diff) < get_errors_n( | |
222 | closest_diff | |
223 | ): | |
224 | closest_diff = tmp_diff | |
225 | closest_idx = idx1 | |
226 | if not found_match and isinstance(v2, (list, dict)): | |
227 | sub_error = "\n\n\t{}".format( | |
228 | "\t".join(get_errors(closest_diff).splitlines(True)) | |
229 | ) | |
230 | acc = add_error( | |
231 | acc, | |
232 | ( | |
233 | "d2 has the following element at index {} which is not present in d1: " | |
234 | + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}" | |
235 | ).format(idx2, dump_json(v2), closest_idx, sub_error), | |
236 | ) | |
237 | if not found_match and not isinstance(v2, (list, dict)): | |
238 | acc = add_error( | |
239 | acc, | |
240 | "d2 has the following element at index {} which is not present in d1: {}".format( | |
241 | idx2, dump_json(v2) | |
242 | ), | |
243 | ) | |
244 | elif isinstance(d1, dict) and isinstance(d2, dict) and exact: | |
245 | invalid_keys_d1 = [k for k in d1.keys() if k not in d2.keys()] | |
246 | invalid_keys_d2 = [k for k in d2.keys() if k not in d1.keys()] | |
247 | for k in invalid_keys_d1: | |
248 | acc = add_error(acc, "d1 has key '{}' which is not present in d2".format(k)) | |
249 | for k in invalid_keys_d2: | |
250 | acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k)) | |
251 | valid_keys_intersection = [k for k in d1.keys() if k in d2.keys()] | |
252 | for k in valid_keys_intersection: | |
253 | acc = merge_errors( | |
254 | acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k)) | |
255 | ) | |
256 | elif isinstance(d1, dict) and isinstance(d2, dict): | |
257 | none_keys = [k for k, v in d2.items() if v == None] | |
258 | none_keys_present = [k for k in d1.keys() if k in none_keys] | |
259 | for k in none_keys_present: | |
260 | acc = add_error( | |
261 | acc, "d1 has key '{}' which is not supposed to be present".format(k) | |
262 | ) | |
263 | keys = [k for k, v in d2.items() if v != None] | |
264 | invalid_keys_intersection = [k for k in keys if k not in d1.keys()] | |
265 | for k in invalid_keys_intersection: | |
266 | acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k)) | |
267 | valid_keys_intersection = [k for k in keys if k in d1.keys()] | |
268 | for k in valid_keys_intersection: | |
269 | acc = merge_errors( | |
270 | acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k)) | |
271 | ) | |
272 | else: | |
273 | acc = add_error( | |
274 | acc, | |
275 | "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format( | |
276 | json_type(d1), json_type(d2) | |
277 | ), | |
278 | points=2, | |
787e7624 | 279 | ) |
a82e5f9a | 280 | |
849224d4 | 281 | return acc |
a82e5f9a | 282 | |
849224d4 G |
283 | |
284 | def json_cmp(d1, d2, exact=False): | |
09e21b44 RZ |
285 | """ |
286 | JSON compare function. Receives two parameters: | |
849224d4 G |
287 | * `d1`: parsed JSON data structure |
288 | * `d2`: parsed JSON data structure | |
289 | ||
290 | Returns 'None' when all JSON Object keys and all Array elements of d2 have a match | |
49581587 | 291 | in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an |
849224d4 G |
292 | error report is generated and wrapped in a 'json_cmp_result()'. There are special |
293 | parameters and notations explained below which can be used to cover rather unusual | |
294 | cases: | |
295 | ||
296 | * when 'exact is set to 'True' then d1 and d2 are tested for equality (including | |
297 | order within JSON Arrays) | |
298 | * using 'null' (or 'None' in Python) as JSON Object value is checking for key | |
299 | absence in d1 | |
300 | * using '*' as JSON Object value or Array value is checking for presence in d1 | |
301 | without checking the values | |
302 | * using '__ordered__' as first element in a JSON Array in d2 will also check the | |
303 | order when it is compared to an Array in d1 | |
09e21b44 | 304 | """ |
09e21b44 | 305 | |
849224d4 | 306 | (errors_n, errors) = gen_json_diff_report(deepcopy(d1), deepcopy(d2), exact=exact) |
3668ed8d | 307 | |
849224d4 G |
308 | if errors_n > 0: |
309 | result = json_cmp_result() | |
310 | result.add_error(errors) | |
3668ed8d | 311 | return result |
849224d4 G |
312 | else: |
313 | return None | |
09e21b44 | 314 | |
a82e5f9a | 315 | |
5cffda18 RZ |
316 | def router_output_cmp(router, cmd, expected): |
317 | """ | |
318 | Runs `cmd` in router and compares the output with `expected`. | |
319 | """ | |
787e7624 | 320 | return difflines( |
321 | normalize_text(router.vtysh_cmd(cmd)), | |
322 | normalize_text(expected), | |
323 | title1="Current output", | |
324 | title2="Expected output", | |
325 | ) | |
5cffda18 RZ |
326 | |
327 | ||
849224d4 | 328 | def router_json_cmp(router, cmd, data, exact=False): |
5cffda18 RZ |
329 | """ |
330 | Runs `cmd` that returns JSON data (normally the command ends with 'json') | |
331 | and compare with `data` contents. | |
332 | """ | |
849224d4 | 333 | return json_cmp(router.vtysh_cmd(cmd, isjson=True), data, exact) |
5cffda18 RZ |
334 | |
335 | ||
1fca63c1 RZ |
336 | def run_and_expect(func, what, count=20, wait=3): |
337 | """ | |
338 | Run `func` and compare the result with `what`. Do it for `count` times | |
339 | waiting `wait` seconds between tries. By default it tries 20 times with | |
340 | 3 seconds delay between tries. | |
341 | ||
342 | Returns (True, func-return) on success or | |
343 | (False, func-return) on failure. | |
5cffda18 RZ |
344 | |
345 | --- | |
346 | ||
347 | Helper functions to use with this function: | |
348 | - router_output_cmp | |
349 | - router_json_cmp | |
1fca63c1 | 350 | """ |
fd858290 RZ |
351 | start_time = time.time() |
352 | func_name = "<unknown>" | |
353 | if func.__class__ == functools.partial: | |
354 | func_name = func.func.__name__ | |
355 | else: | |
356 | func_name = func.__name__ | |
357 | ||
a5722d5a DA |
358 | # Just a safety-check to avoid running topotests with very |
359 | # small wait/count arguments. | |
360 | wait_time = wait * count | |
361 | if wait_time < 5: | |
362 | assert ( | |
363 | wait_time >= 5 | |
364 | ), "Waiting time is too small (count={}, wait={}), adjust timer values".format( | |
365 | count, wait | |
366 | ) | |
367 | ||
fd858290 | 368 | logger.info( |
8d3dab20 IR |
369 | "'{}' polling started (interval {} secs, maximum {} tries)".format( |
370 | func_name, wait, count | |
787e7624 | 371 | ) |
372 | ) | |
fd858290 | 373 | |
1fca63c1 RZ |
374 | while count > 0: |
375 | result = func() | |
376 | if result != what: | |
570f25d8 | 377 | time.sleep(wait) |
1fca63c1 RZ |
378 | count -= 1 |
379 | continue | |
fd858290 RZ |
380 | |
381 | end_time = time.time() | |
787e7624 | 382 | logger.info( |
383 | "'{}' succeeded after {:.2f} seconds".format( | |
384 | func_name, end_time - start_time | |
385 | ) | |
386 | ) | |
1fca63c1 | 387 | return (True, result) |
fd858290 RZ |
388 | |
389 | end_time = time.time() | |
787e7624 | 390 | logger.error( |
391 | "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time) | |
392 | ) | |
1fca63c1 RZ |
393 | return (False, result) |
394 | ||
395 | ||
a6fd124a RZ |
396 | def run_and_expect_type(func, etype, count=20, wait=3, avalue=None): |
397 | """ | |
398 | Run `func` and compare the result with `etype`. Do it for `count` times | |
399 | waiting `wait` seconds between tries. By default it tries 20 times with | |
400 | 3 seconds delay between tries. | |
401 | ||
402 | This function is used when you want to test the return type and, | |
403 | optionally, the return value. | |
404 | ||
405 | Returns (True, func-return) on success or | |
406 | (False, func-return) on failure. | |
407 | """ | |
408 | start_time = time.time() | |
409 | func_name = "<unknown>" | |
410 | if func.__class__ == functools.partial: | |
411 | func_name = func.func.__name__ | |
412 | else: | |
413 | func_name = func.__name__ | |
414 | ||
a5722d5a DA |
415 | # Just a safety-check to avoid running topotests with very |
416 | # small wait/count arguments. | |
417 | wait_time = wait * count | |
418 | if wait_time < 5: | |
419 | assert ( | |
420 | wait_time >= 5 | |
421 | ), "Waiting time is too small (count={}, wait={}), adjust timer values".format( | |
422 | count, wait | |
423 | ) | |
424 | ||
a6fd124a RZ |
425 | logger.info( |
426 | "'{}' polling started (interval {} secs, maximum wait {} secs)".format( | |
787e7624 | 427 | func_name, wait, int(wait * count) |
428 | ) | |
429 | ) | |
a6fd124a RZ |
430 | |
431 | while count > 0: | |
432 | result = func() | |
433 | if not isinstance(result, etype): | |
787e7624 | 434 | logger.debug( |
435 | "Expected result type '{}' got '{}' instead".format(etype, type(result)) | |
436 | ) | |
a6fd124a RZ |
437 | time.sleep(wait) |
438 | count -= 1 | |
439 | continue | |
440 | ||
441 | if etype != type(None) and avalue != None and result != avalue: | |
442 | logger.debug("Expected value '{}' got '{}' instead".format(avalue, result)) | |
443 | time.sleep(wait) | |
444 | count -= 1 | |
445 | continue | |
446 | ||
447 | end_time = time.time() | |
787e7624 | 448 | logger.info( |
449 | "'{}' succeeded after {:.2f} seconds".format( | |
450 | func_name, end_time - start_time | |
451 | ) | |
452 | ) | |
a6fd124a RZ |
453 | return (True, result) |
454 | ||
455 | end_time = time.time() | |
787e7624 | 456 | logger.error( |
457 | "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time) | |
458 | ) | |
a6fd124a RZ |
459 | return (False, result) |
460 | ||
461 | ||
1375385a CH |
462 | def router_json_cmp_retry(router, cmd, data, exact=False, retry_timeout=10.0): |
463 | """ | |
464 | Runs `cmd` that returns JSON data (normally the command ends with 'json') | |
465 | and compare with `data` contents. Retry by default for 10 seconds | |
466 | """ | |
467 | ||
468 | def test_func(): | |
469 | return router_json_cmp(router, cmd, data, exact) | |
470 | ||
471 | ok, _ = run_and_expect(test_func, None, int(retry_timeout), 1) | |
472 | return ok | |
473 | ||
474 | ||
594b1259 MW |
475 | def int2dpid(dpid): |
476 | "Converting Integer to DPID" | |
477 | ||
478 | try: | |
479 | dpid = hex(dpid)[2:] | |
787e7624 | 480 | dpid = "0" * (16 - len(dpid)) + dpid |
594b1259 MW |
481 | return dpid |
482 | except IndexError: | |
787e7624 | 483 | raise Exception( |
484 | "Unable to derive default datapath ID - " | |
485 | "please either specify a dpid or use a " | |
486 | "canonical switch name such as s23." | |
487 | ) | |
488 | ||
594b1259 | 489 | |
50c40bde MW |
490 | def pid_exists(pid): |
491 | "Check whether pid exists in the current process table." | |
492 | ||
493 | if pid <= 0: | |
494 | return False | |
f033a78a DL |
495 | try: |
496 | os.waitpid(pid, os.WNOHANG) | |
497 | except: | |
498 | pass | |
50c40bde MW |
499 | try: |
500 | os.kill(pid, 0) | |
501 | except OSError as err: | |
502 | if err.errno == errno.ESRCH: | |
503 | # ESRCH == No such process | |
504 | return False | |
505 | elif err.errno == errno.EPERM: | |
506 | # EPERM clearly means there's a process to deny access to | |
507 | return True | |
508 | else: | |
509 | # According to "man 2 kill" possible error values are | |
510 | # (EINVAL, EPERM, ESRCH) | |
511 | raise | |
512 | else: | |
513 | return True | |
514 | ||
787e7624 | 515 | |
bc2872fd | 516 | def get_textdiff(text1, text2, title1="", title2="", **opts): |
17070436 MW |
517 | "Returns empty string if same or formatted diff" |
518 | ||
787e7624 | 519 | diff = "\n".join( |
520 | difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts) | |
521 | ) | |
17070436 MW |
522 | # Clean up line endings |
523 | diff = os.linesep.join([s for s in diff.splitlines() if s]) | |
524 | return diff | |
525 | ||
787e7624 | 526 | |
527 | def difflines(text1, text2, title1="", title2="", **opts): | |
1fca63c1 | 528 | "Wrapper for get_textdiff to avoid string transformations." |
787e7624 | 529 | text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1) |
530 | text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1) | |
bc2872fd | 531 | return get_textdiff(text1, text2, title1, title2, **opts) |
1fca63c1 | 532 | |
787e7624 | 533 | |
1fca63c1 RZ |
534 | def get_file(content): |
535 | """ | |
536 | Generates a temporary file in '/tmp' with `content` and returns the file name. | |
537 | """ | |
49581587 CH |
538 | if isinstance(content, list) or isinstance(content, tuple): |
539 | content = "\n".join(content) | |
787e7624 | 540 | fde = tempfile.NamedTemporaryFile(mode="w", delete=False) |
1fca63c1 RZ |
541 | fname = fde.name |
542 | fde.write(content) | |
543 | fde.close() | |
544 | return fname | |
545 | ||
787e7624 | 546 | |
f7840f6b RZ |
547 | def normalize_text(text): |
548 | """ | |
9683a1bb | 549 | Strips formating spaces/tabs, carriage returns and trailing whitespace. |
f7840f6b | 550 | """ |
787e7624 | 551 | text = re.sub(r"[ \t]+", " ", text) |
552 | text = re.sub(r"\r", "", text) | |
9683a1bb RZ |
553 | |
554 | # Remove whitespace in the middle of text. | |
787e7624 | 555 | text = re.sub(r"[ \t]+\n", "\n", text) |
9683a1bb RZ |
556 | # Remove whitespace at the end of the text. |
557 | text = text.rstrip() | |
558 | ||
f7840f6b RZ |
559 | return text |
560 | ||
787e7624 | 561 | |
0414a764 DS |
562 | def is_linux(): |
563 | """ | |
564 | Parses unix name output to check if running on GNU/Linux. | |
565 | ||
566 | Returns True if running on Linux, returns False otherwise. | |
567 | """ | |
568 | ||
569 | if os.uname()[0] == "Linux": | |
570 | return True | |
571 | return False | |
572 | ||
573 | ||
574 | def iproute2_is_vrf_capable(): | |
575 | """ | |
576 | Checks if the iproute2 version installed on the system is capable of | |
577 | handling VRFs by interpreting the output of the 'ip' utility found in PATH. | |
578 | ||
579 | Returns True if capability can be detected, returns False otherwise. | |
580 | """ | |
581 | ||
582 | if is_linux(): | |
583 | try: | |
584 | subp = subprocess.Popen( | |
585 | ["ip", "route", "show", "vrf"], | |
586 | stdout=subprocess.PIPE, | |
587 | stderr=subprocess.PIPE, | |
0b25370e | 588 | stdin=subprocess.PIPE, |
0414a764 DS |
589 | ) |
590 | iproute2_err = subp.communicate()[1].splitlines()[0].split()[0] | |
591 | ||
592 | if iproute2_err != "Error:": | |
593 | return True | |
594 | except Exception: | |
595 | pass | |
596 | return False | |
597 | ||
598 | ||
cc95fbd9 | 599 | def module_present_linux(module, load): |
f2d6ce41 CF |
600 | """ |
601 | Returns whether `module` is present. | |
602 | ||
603 | If `load` is true, it will try to load it via modprobe. | |
604 | """ | |
787e7624 | 605 | with open("/proc/modules", "r") as modules_file: |
606 | if module.replace("-", "_") in modules_file.read(): | |
f2d6ce41 | 607 | return True |
787e7624 | 608 | cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module) |
f2d6ce41 CF |
609 | if os.system(cmd) != 0: |
610 | return False | |
611 | else: | |
612 | return True | |
613 | ||
787e7624 | 614 | |
cc95fbd9 DS |
615 | def module_present_freebsd(module, load): |
616 | return True | |
617 | ||
787e7624 | 618 | |
cc95fbd9 DS |
619 | def module_present(module, load=True): |
620 | if sys.platform.startswith("linux"): | |
28440fd9 | 621 | return module_present_linux(module, load) |
cc95fbd9 | 622 | elif sys.platform.startswith("freebsd"): |
28440fd9 | 623 | return module_present_freebsd(module, load) |
cc95fbd9 | 624 | |
787e7624 | 625 | |
4190fe1e RZ |
626 | def version_cmp(v1, v2): |
627 | """ | |
628 | Compare two version strings and returns: | |
629 | ||
630 | * `-1`: if `v1` is less than `v2` | |
631 | * `0`: if `v1` is equal to `v2` | |
632 | * `1`: if `v1` is greater than `v2` | |
633 | ||
634 | Raises `ValueError` if versions are not well formated. | |
635 | """ | |
787e7624 | 636 | vregex = r"(?P<whole>\d+(\.(\d+))*)" |
4190fe1e RZ |
637 | v1m = re.match(vregex, v1) |
638 | v2m = re.match(vregex, v2) | |
639 | if v1m is None or v2m is None: | |
640 | raise ValueError("got a invalid version string") | |
641 | ||
642 | # Split values | |
787e7624 | 643 | v1g = v1m.group("whole").split(".") |
644 | v2g = v2m.group("whole").split(".") | |
4190fe1e RZ |
645 | |
646 | # Get the longest version string | |
647 | vnum = len(v1g) | |
648 | if len(v2g) > vnum: | |
649 | vnum = len(v2g) | |
650 | ||
651 | # Reverse list because we are going to pop the tail | |
652 | v1g.reverse() | |
653 | v2g.reverse() | |
654 | for _ in range(vnum): | |
655 | try: | |
656 | v1n = int(v1g.pop()) | |
657 | except IndexError: | |
658 | while v2g: | |
659 | v2n = int(v2g.pop()) | |
660 | if v2n > 0: | |
661 | return -1 | |
662 | break | |
663 | ||
664 | try: | |
665 | v2n = int(v2g.pop()) | |
666 | except IndexError: | |
667 | if v1n > 0: | |
668 | return 1 | |
669 | while v1g: | |
670 | v1n = int(v1g.pop()) | |
671 | if v1n > 0: | |
034237db | 672 | return 1 |
4190fe1e RZ |
673 | break |
674 | ||
675 | if v1n > v2n: | |
676 | return 1 | |
677 | if v1n < v2n: | |
678 | return -1 | |
679 | return 0 | |
680 | ||
787e7624 | 681 | |
f5612168 PG |
682 | def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None): |
683 | if ifaceaction: | |
787e7624 | 684 | str_ifaceaction = "no shutdown" |
f5612168 | 685 | else: |
787e7624 | 686 | str_ifaceaction = "shutdown" |
f5612168 | 687 | if vrf_name == None: |
787e7624 | 688 | cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format( |
689 | ifacename, str_ifaceaction | |
690 | ) | |
f5612168 | 691 | else: |
9fa6ec14 | 692 | cmd = ( |
693 | 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format( | |
694 | ifacename, vrf_name, str_ifaceaction | |
695 | ) | |
787e7624 | 696 | ) |
f5612168 PG |
697 | node.run(cmd) |
698 | ||
787e7624 | 699 | |
b220b3c8 PG |
700 | def ip4_route_zebra(node, vrf_name=None): |
701 | """ | |
702 | Gets an output of 'show ip route' command. It can be used | |
703 | with comparing the output to a reference | |
704 | """ | |
705 | if vrf_name == None: | |
787e7624 | 706 | tmp = node.vtysh_cmd("show ip route") |
b220b3c8 | 707 | else: |
787e7624 | 708 | tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name)) |
b220b3c8 | 709 | output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp) |
41077aa1 CF |
710 | |
711 | lines = output.splitlines() | |
712 | header_found = False | |
0eff5820 | 713 | while lines and (not lines[0].strip() or not header_found): |
5a3cf853 | 714 | if "o - offload failure" in lines[0]: |
41077aa1 CF |
715 | header_found = True |
716 | lines = lines[1:] | |
787e7624 | 717 | return "\n".join(lines) |
718 | ||
b220b3c8 | 719 | |
e394d9aa MS |
720 | def ip6_route_zebra(node, vrf_name=None): |
721 | """ | |
722 | Retrieves the output of 'show ipv6 route [vrf vrf_name]', then | |
723 | canonicalizes it by eliding link-locals. | |
724 | """ | |
725 | ||
726 | if vrf_name == None: | |
787e7624 | 727 | tmp = node.vtysh_cmd("show ipv6 route") |
e394d9aa | 728 | else: |
787e7624 | 729 | tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name)) |
e394d9aa MS |
730 | |
731 | # Mask out timestamp | |
732 | output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp) | |
733 | ||
734 | # Mask out the link-local addresses | |
787e7624 | 735 | output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output) |
e394d9aa MS |
736 | |
737 | lines = output.splitlines() | |
738 | header_found = False | |
739 | while lines and (not lines[0].strip() or not header_found): | |
5a3cf853 | 740 | if "o - offload failure" in lines[0]: |
e394d9aa MS |
741 | header_found = True |
742 | lines = lines[1:] | |
743 | ||
787e7624 | 744 | return "\n".join(lines) |
e394d9aa MS |
745 | |
746 | ||
2f726781 MW |
747 | def proto_name_to_number(protocol): |
748 | return { | |
787e7624 | 749 | "bgp": "186", |
750 | "isis": "187", | |
751 | "ospf": "188", | |
752 | "rip": "189", | |
753 | "ripng": "190", | |
754 | "nhrp": "191", | |
755 | "eigrp": "192", | |
756 | "ldp": "193", | |
757 | "sharp": "194", | |
758 | "pbr": "195", | |
759 | "static": "196", | |
d1b5fa5b | 760 | "ospf6": "197", |
787e7624 | 761 | }.get( |
762 | protocol, protocol | |
763 | ) # default return same as input | |
2f726781 MW |
764 | |
765 | ||
99a7a912 RZ |
766 | def ip4_route(node): |
767 | """ | |
768 | Gets a structured return of the command 'ip route'. It can be used in | |
4563e204 | 769 | conjunction with json_cmp() to provide accurate assert explanations. |
99a7a912 RZ |
770 | |
771 | Return example: | |
772 | { | |
773 | '10.0.1.0/24': { | |
774 | 'dev': 'eth0', | |
775 | 'via': '172.16.0.1', | |
776 | 'proto': '188', | |
777 | }, | |
778 | '10.0.2.0/24': { | |
779 | 'dev': 'eth1', | |
780 | 'proto': 'kernel', | |
781 | } | |
782 | } | |
783 | """ | |
787e7624 | 784 | output = normalize_text(node.run("ip route")).splitlines() |
99a7a912 RZ |
785 | result = {} |
786 | for line in output: | |
787e7624 | 787 | columns = line.split(" ") |
99a7a912 RZ |
788 | route = result[columns[0]] = {} |
789 | prev = None | |
790 | for column in columns: | |
787e7624 | 791 | if prev == "dev": |
792 | route["dev"] = column | |
793 | if prev == "via": | |
794 | route["via"] = column | |
795 | if prev == "proto": | |
2f726781 | 796 | # translate protocol names back to numbers |
787e7624 | 797 | route["proto"] = proto_name_to_number(column) |
798 | if prev == "metric": | |
799 | route["metric"] = column | |
800 | if prev == "scope": | |
801 | route["scope"] = column | |
99a7a912 RZ |
802 | prev = column |
803 | ||
804 | return result | |
805 | ||
787e7624 | 806 | |
9375b5aa | 807 | def ip4_vrf_route(node): |
808 | """ | |
809 | Gets a structured return of the command 'ip route show vrf {0}-cust1'. | |
4563e204 | 810 | It can be used in conjunction with json_cmp() to provide accurate assert explanations. |
9375b5aa | 811 | |
812 | Return example: | |
813 | { | |
814 | '10.0.1.0/24': { | |
815 | 'dev': 'eth0', | |
816 | 'via': '172.16.0.1', | |
817 | 'proto': '188', | |
818 | }, | |
819 | '10.0.2.0/24': { | |
820 | 'dev': 'eth1', | |
821 | 'proto': 'kernel', | |
822 | } | |
823 | } | |
824 | """ | |
825 | output = normalize_text( | |
701a0192 | 826 | node.run("ip route show vrf {0}-cust1".format(node.name)) |
827 | ).splitlines() | |
9375b5aa | 828 | |
829 | result = {} | |
830 | for line in output: | |
831 | columns = line.split(" ") | |
832 | route = result[columns[0]] = {} | |
833 | prev = None | |
834 | for column in columns: | |
835 | if prev == "dev": | |
836 | route["dev"] = column | |
837 | if prev == "via": | |
838 | route["via"] = column | |
839 | if prev == "proto": | |
840 | # translate protocol names back to numbers | |
841 | route["proto"] = proto_name_to_number(column) | |
842 | if prev == "metric": | |
843 | route["metric"] = column | |
844 | if prev == "scope": | |
845 | route["scope"] = column | |
846 | prev = column | |
847 | ||
848 | return result | |
849 | ||
850 | ||
99a7a912 RZ |
851 | def ip6_route(node): |
852 | """ | |
853 | Gets a structured return of the command 'ip -6 route'. It can be used in | |
4563e204 | 854 | conjunction with json_cmp() to provide accurate assert explanations. |
99a7a912 RZ |
855 | |
856 | Return example: | |
857 | { | |
858 | '2001:db8:1::/64': { | |
859 | 'dev': 'eth0', | |
860 | 'proto': '188', | |
861 | }, | |
862 | '2001:db8:2::/64': { | |
863 | 'dev': 'eth1', | |
864 | 'proto': 'kernel', | |
865 | } | |
866 | } | |
867 | """ | |
787e7624 | 868 | output = normalize_text(node.run("ip -6 route")).splitlines() |
99a7a912 RZ |
869 | result = {} |
870 | for line in output: | |
787e7624 | 871 | columns = line.split(" ") |
99a7a912 RZ |
872 | route = result[columns[0]] = {} |
873 | prev = None | |
874 | for column in columns: | |
787e7624 | 875 | if prev == "dev": |
876 | route["dev"] = column | |
877 | if prev == "via": | |
878 | route["via"] = column | |
879 | if prev == "proto": | |
2f726781 | 880 | # translate protocol names back to numbers |
787e7624 | 881 | route["proto"] = proto_name_to_number(column) |
882 | if prev == "metric": | |
883 | route["metric"] = column | |
884 | if prev == "pref": | |
885 | route["pref"] = column | |
99a7a912 RZ |
886 | prev = column |
887 | ||
888 | return result | |
889 | ||
787e7624 | 890 | |
9375b5aa | 891 | def ip6_vrf_route(node): |
892 | """ | |
893 | Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'. | |
4563e204 | 894 | It can be used in conjunction with json_cmp() to provide accurate assert explanations. |
9375b5aa | 895 | |
896 | Return example: | |
897 | { | |
898 | '2001:db8:1::/64': { | |
899 | 'dev': 'eth0', | |
900 | 'proto': '188', | |
901 | }, | |
902 | '2001:db8:2::/64': { | |
903 | 'dev': 'eth1', | |
904 | 'proto': 'kernel', | |
905 | } | |
906 | } | |
907 | """ | |
908 | output = normalize_text( | |
701a0192 | 909 | node.run("ip -6 route show vrf {0}-cust1".format(node.name)) |
910 | ).splitlines() | |
9375b5aa | 911 | result = {} |
912 | for line in output: | |
913 | columns = line.split(" ") | |
914 | route = result[columns[0]] = {} | |
915 | prev = None | |
916 | for column in columns: | |
917 | if prev == "dev": | |
918 | route["dev"] = column | |
919 | if prev == "via": | |
920 | route["via"] = column | |
921 | if prev == "proto": | |
922 | # translate protocol names back to numbers | |
923 | route["proto"] = proto_name_to_number(column) | |
924 | if prev == "metric": | |
925 | route["metric"] = column | |
926 | if prev == "pref": | |
927 | route["pref"] = column | |
928 | prev = column | |
929 | ||
930 | return result | |
931 | ||
932 | ||
9b7decf2 JU |
933 | def ip_rules(node): |
934 | """ | |
935 | Gets a structured return of the command 'ip rule'. It can be used in | |
4563e204 | 936 | conjunction with json_cmp() to provide accurate assert explanations. |
9b7decf2 JU |
937 | |
938 | Return example: | |
939 | [ | |
940 | { | |
941 | "pref": "0" | |
942 | "from": "all" | |
943 | }, | |
944 | { | |
945 | "pref": "32766" | |
946 | "from": "all" | |
947 | }, | |
948 | { | |
949 | "to": "3.4.5.0/24", | |
950 | "iif": "r1-eth2", | |
951 | "pref": "304", | |
952 | "from": "1.2.0.0/16", | |
953 | "proto": "zebra" | |
954 | } | |
955 | ] | |
956 | """ | |
957 | output = normalize_text(node.run("ip rule")).splitlines() | |
958 | result = [] | |
959 | for line in output: | |
960 | columns = line.split(" ") | |
961 | ||
962 | route = {} | |
963 | # remove last character, since it is ':' | |
964 | pref = columns[0][:-1] | |
965 | route["pref"] = pref | |
966 | prev = None | |
967 | for column in columns: | |
968 | if prev == "from": | |
969 | route["from"] = column | |
970 | if prev == "to": | |
971 | route["to"] = column | |
972 | if prev == "proto": | |
973 | route["proto"] = column | |
974 | if prev == "iif": | |
975 | route["iif"] = column | |
976 | if prev == "fwmark": | |
977 | route["fwmark"] = column | |
978 | prev = column | |
979 | ||
980 | result.append(route) | |
981 | return result | |
982 | ||
983 | ||
570f25d8 RZ |
984 | def sleep(amount, reason=None): |
985 | """ | |
986 | Sleep wrapper that registers in the log the amount of sleep | |
987 | """ | |
988 | if reason is None: | |
787e7624 | 989 | logger.info("Sleeping for {} seconds".format(amount)) |
570f25d8 | 990 | else: |
787e7624 | 991 | logger.info(reason + " ({} seconds)".format(amount)) |
570f25d8 RZ |
992 | |
993 | time.sleep(amount) | |
994 | ||
787e7624 | 995 | |
be2656ed | 996 | def checkAddressSanitizerError(output, router, component, logdir=""): |
4942f298 MW |
997 | "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise" |
998 | ||
be2656ed | 999 | def processAddressSanitizerError(asanErrorRe, output, router, component): |
787e7624 | 1000 | sys.stderr.write( |
1001 | "%s: %s triggered an exception by AddressSanitizer\n" % (router, component) | |
1002 | ) | |
4942f298 | 1003 | # Sanitizer Error found in log |
be2656ed | 1004 | pidMark = asanErrorRe.group(1) |
6ee4440e | 1005 | addressSanitizerLog = re.search( |
787e7624 | 1006 | "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL |
1007 | ) | |
6ee4440e | 1008 | if addressSanitizerLog: |
be2656ed | 1009 | # Find Calling Test. Could be multiple steps back |
9fa6ec14 | 1010 | testframe = sys._current_frames().values()[0] |
1011 | level = 0 | |
be2656ed | 1012 | while level < 10: |
9fa6ec14 | 1013 | test = os.path.splitext( |
1014 | os.path.basename(testframe.f_globals["__file__"]) | |
1015 | )[0] | |
be2656ed MW |
1016 | if (test != "topotest") and (test != "topogen"): |
1017 | # Found the calling test | |
9fa6ec14 | 1018 | callingTest = os.path.basename(testframe.f_globals["__file__"]) |
be2656ed | 1019 | break |
9fa6ec14 | 1020 | level = level + 1 |
1021 | testframe = testframe.f_back | |
1022 | if level >= 10: | |
be2656ed | 1023 | # somehow couldn't find the test script. |
9fa6ec14 | 1024 | callingTest = "unknownTest" |
be2656ed MW |
1025 | # |
1026 | # Now finding Calling Procedure | |
9fa6ec14 | 1027 | level = 0 |
be2656ed | 1028 | while level < 20: |
9fa6ec14 | 1029 | callingProc = sys._getframe(level).f_code.co_name |
1030 | if ( | |
1031 | (callingProc != "processAddressSanitizerError") | |
1032 | and (callingProc != "checkAddressSanitizerError") | |
1033 | and (callingProc != "checkRouterCores") | |
1034 | and (callingProc != "stopRouter") | |
9fa6ec14 | 1035 | and (callingProc != "stop") |
1036 | and (callingProc != "stop_topology") | |
1037 | and (callingProc != "checkRouterRunning") | |
1038 | and (callingProc != "check_router_running") | |
1039 | and (callingProc != "routers_have_failure") | |
1040 | ): | |
be2656ed MW |
1041 | # Found the calling test |
1042 | break | |
9fa6ec14 | 1043 | level = level + 1 |
1044 | if level >= 20: | |
be2656ed | 1045 | # something wrong - couldn't found the calling test function |
9fa6ec14 | 1046 | callingProc = "unknownProc" |
4942f298 | 1047 | with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile: |
be2656ed MW |
1048 | sys.stderr.write( |
1049 | "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" | |
1050 | % (callingTest, callingProc, router) | |
1051 | ) | |
787e7624 | 1052 | sys.stderr.write( |
6ee4440e | 1053 | "\n".join(addressSanitizerLog.group(1).splitlines()) + "\n" |
787e7624 | 1054 | ) |
be2656ed | 1055 | addrSanFile.write("## Error: %s\n\n" % asanErrorRe.group(2)) |
787e7624 | 1056 | addrSanFile.write( |
1057 | "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" | |
1058 | % (callingTest, callingProc, router) | |
1059 | ) | |
1060 | addrSanFile.write( | |
1061 | " " | |
6ee4440e | 1062 | + "\n ".join(addressSanitizerLog.group(1).splitlines()) |
787e7624 | 1063 | + "\n" |
1064 | ) | |
4942f298 | 1065 | addrSanFile.write("\n---------------\n") |
be2656ed MW |
1066 | return |
1067 | ||
6ee4440e | 1068 | addressSanitizerError = re.search( |
49581587 | 1069 | r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output |
be2656ed | 1070 | ) |
6ee4440e MS |
1071 | if addressSanitizerError: |
1072 | processAddressSanitizerError(addressSanitizerError, output, router, component) | |
4942f298 | 1073 | return True |
be2656ed MW |
1074 | |
1075 | # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file | |
1076 | if logdir: | |
9fa6ec14 | 1077 | filepattern = logdir + "/" + router + "/" + component + ".asan.*" |
1078 | logger.debug( | |
1079 | "Log check for %s on %s, pattern %s\n" % (component, router, filepattern) | |
1080 | ) | |
be2656ed MW |
1081 | for file in glob.glob(filepattern): |
1082 | with open(file, "r") as asanErrorFile: | |
9fa6ec14 | 1083 | asanError = asanErrorFile.read() |
6ee4440e | 1084 | addressSanitizerError = re.search( |
49581587 | 1085 | r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError |
9fa6ec14 | 1086 | ) |
6ee4440e | 1087 | if addressSanitizerError: |
9fa6ec14 | 1088 | processAddressSanitizerError( |
1089 | addressSanitizerError, asanError, router, component | |
1090 | ) | |
be2656ed | 1091 | return True |
6c131bd3 | 1092 | return False |
4942f298 | 1093 | |
787e7624 | 1094 | |
49581587 CH |
1095 | def _sysctl_atleast(commander, variable, min_value): |
1096 | if isinstance(min_value, tuple): | |
1097 | min_value = list(min_value) | |
1098 | is_list = isinstance(min_value, list) | |
594b1259 | 1099 | |
49581587 CH |
1100 | sval = commander.cmd_raises("sysctl -n " + variable).strip() |
1101 | if is_list: | |
1102 | cur_val = [int(x) for x in sval.split()] | |
1103 | else: | |
1104 | cur_val = int(sval) | |
1105 | ||
1106 | set_value = False | |
1107 | if is_list: | |
1108 | for i, v in enumerate(cur_val): | |
1109 | if v < min_value[i]: | |
1110 | set_value = True | |
1111 | else: | |
1112 | min_value[i] = v | |
1113 | else: | |
1114 | if cur_val < min_value: | |
1115 | set_value = True | |
1116 | if set_value: | |
1117 | if is_list: | |
1118 | valstr = " ".join([str(x) for x in min_value]) | |
1119 | else: | |
1120 | valstr = str(min_value) | |
1121 | logger.info("Increasing sysctl %s from %s to %s", variable, cur_val, valstr) | |
a53c08bc | 1122 | commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr)) |
594b1259 | 1123 | |
787e7624 | 1124 | |
49581587 CH |
1125 | def _sysctl_assure(commander, variable, value): |
1126 | if isinstance(value, tuple): | |
1127 | value = list(value) | |
1128 | is_list = isinstance(value, list) | |
797e8dcf | 1129 | |
49581587 CH |
1130 | sval = commander.cmd_raises("sysctl -n " + variable).strip() |
1131 | if is_list: | |
1132 | cur_val = [int(x) for x in sval.split()] | |
1133 | else: | |
1134 | cur_val = sval | |
797e8dcf | 1135 | |
49581587 CH |
1136 | set_value = False |
1137 | if is_list: | |
1138 | for i, v in enumerate(cur_val): | |
1139 | if v != value[i]: | |
1140 | set_value = True | |
1141 | else: | |
1142 | value[i] = v | |
1143 | else: | |
1144 | if cur_val != str(value): | |
1145 | set_value = True | |
1146 | ||
1147 | if set_value: | |
1148 | if is_list: | |
1149 | valstr = " ".join([str(x) for x in value]) | |
1150 | else: | |
1151 | valstr = str(value) | |
1152 | logger.info("Changing sysctl %s from %s to %s", variable, cur_val, valstr) | |
a53c08bc | 1153 | commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr)) |
49581587 CH |
1154 | |
1155 | ||
1156 | def sysctl_atleast(commander, variable, min_value, raises=False): | |
1157 | try: | |
1158 | if commander is None: | |
1159 | commander = micronet.Commander("topotest") | |
1160 | return _sysctl_atleast(commander, variable, min_value) | |
1161 | except subprocess.CalledProcessError as error: | |
1162 | logger.warning( | |
1163 | "%s: Failed to assure sysctl min value %s = %s", | |
a53c08bc CH |
1164 | commander, |
1165 | variable, | |
1166 | min_value, | |
49581587 CH |
1167 | ) |
1168 | if raises: | |
1169 | raise | |
797e8dcf | 1170 | |
787e7624 | 1171 | |
49581587 CH |
1172 | def sysctl_assure(commander, variable, value, raises=False): |
1173 | try: | |
1174 | if commander is None: | |
1175 | commander = micronet.Commander("topotest") | |
1176 | return _sysctl_assure(commander, variable, value) | |
1177 | except subprocess.CalledProcessError as error: | |
1178 | logger.warning( | |
1179 | "%s: Failed to assure sysctl value %s = %s", | |
a53c08bc CH |
1180 | commander, |
1181 | variable, | |
1182 | value, | |
1183 | exc_info=True, | |
49581587 CH |
1184 | ) |
1185 | if raises: | |
1186 | raise | |
1187 | ||
1188 | ||
1189 | def rlimit_atleast(rname, min_value, raises=False): | |
1190 | try: | |
1191 | cval = resource.getrlimit(rname) | |
1192 | soft, hard = cval | |
1193 | if soft < min_value: | |
1194 | nval = (min_value, hard if min_value < hard else min_value) | |
1195 | logger.info("Increasing rlimit %s from %s to %s", rname, cval, nval) | |
1196 | resource.setrlimit(rname, nval) | |
1197 | except subprocess.CalledProcessError as error: | |
1198 | logger.warning( | |
a53c08bc | 1199 | "Failed to assure rlimit [%s] = %s", rname, min_value, exc_info=True |
49581587 CH |
1200 | ) |
1201 | if raises: | |
1202 | raise | |
1203 | ||
1204 | ||
1205 | def fix_netns_limits(ns): | |
1206 | ||
1207 | # Maximum read and write socket buffer sizes | |
a53c08bc CH |
1208 | sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20]) |
1209 | sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20]) | |
49581587 CH |
1210 | |
1211 | sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0) | |
1212 | sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0) | |
1213 | sysctl_assure(ns, "net.ipv4.conf.lo.rp_filter", 0) | |
1214 | ||
1215 | sysctl_assure(ns, "net.ipv4.conf.all.forwarding", 1) | |
1216 | sysctl_assure(ns, "net.ipv4.conf.default.forwarding", 1) | |
1217 | ||
1218 | # XXX if things fail look here as this wasn't done previously | |
1219 | sysctl_assure(ns, "net.ipv6.conf.all.forwarding", 1) | |
1220 | sysctl_assure(ns, "net.ipv6.conf.default.forwarding", 1) | |
1221 | ||
1222 | # ARP | |
1223 | sysctl_assure(ns, "net.ipv4.conf.default.arp_announce", 2) | |
1224 | sysctl_assure(ns, "net.ipv4.conf.default.arp_notify", 1) | |
1225 | # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for | |
1226 | sysctl_assure(ns, "net.ipv4.conf.default.arp_ignore", 0) | |
1227 | sysctl_assure(ns, "net.ipv4.conf.all.arp_announce", 2) | |
1228 | sysctl_assure(ns, "net.ipv4.conf.all.arp_notify", 1) | |
1229 | # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for | |
1230 | sysctl_assure(ns, "net.ipv4.conf.all.arp_ignore", 0) | |
1231 | ||
1232 | sysctl_assure(ns, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1) | |
1233 | ||
1234 | # Keep ipv6 permanent addresses on an admin down | |
1235 | sysctl_assure(ns, "net.ipv6.conf.all.keep_addr_on_down", 1) | |
1236 | if version_cmp(platform.release(), "4.20") >= 0: | |
1237 | sysctl_assure(ns, "net.ipv6.route.skip_notify_on_dev_down", 1) | |
1238 | ||
1239 | sysctl_assure(ns, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1) | |
1240 | sysctl_assure(ns, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1) | |
1241 | ||
1242 | # igmp | |
1243 | sysctl_atleast(ns, "net.ipv4.igmp_max_memberships", 1000) | |
1244 | ||
1245 | # Use neigh information on selection of nexthop for multipath hops | |
1246 | sysctl_assure(ns, "net.ipv4.fib_multipath_use_neigh", 1) | |
1247 | ||
1248 | ||
1249 | def fix_host_limits(): | |
1250 | """Increase system limits.""" | |
1251 | ||
a53c08bc CH |
1252 | rlimit_atleast(resource.RLIMIT_NPROC, 8 * 1024) |
1253 | rlimit_atleast(resource.RLIMIT_NOFILE, 16 * 1024) | |
1254 | sysctl_atleast(None, "fs.file-max", 16 * 1024) | |
1255 | sysctl_atleast(None, "kernel.pty.max", 16 * 1024) | |
49581587 CH |
1256 | |
1257 | # Enable coredumps | |
1258 | # Original on ubuntu 17.x, but apport won't save as in namespace | |
1259 | # |/usr/share/apport/apport %p %s %c %d %P | |
1260 | sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp") | |
1261 | sysctl_assure(None, "kernel.core_uses_pid", 1) | |
1262 | sysctl_assure(None, "fs.suid_dumpable", 1) | |
1263 | ||
1264 | # Maximum connection backlog | |
a53c08bc | 1265 | sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024) |
49581587 CH |
1266 | |
1267 | # Maximum read and write socket buffer sizes | |
a53c08bc CH |
1268 | sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20) |
1269 | sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20) | |
49581587 CH |
1270 | |
1271 | # Garbage Collection Settings for ARP and Neighbors | |
a53c08bc CH |
1272 | sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024) |
1273 | sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024) | |
1274 | sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024) | |
1275 | sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024) | |
49581587 | 1276 | # Hold entries for 10 minutes |
a53c08bc CH |
1277 | sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000) |
1278 | sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000) | |
49581587 CH |
1279 | |
1280 | # igmp | |
1281 | sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10) | |
1282 | ||
1283 | # MLD | |
1284 | sysctl_atleast(None, "net.ipv6.mld_max_msf", 512) | |
1285 | ||
1286 | # Increase routing table size to 128K | |
a53c08bc CH |
1287 | sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024) |
1288 | sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024) | |
49581587 CH |
1289 | |
1290 | ||
1291 | def setup_node_tmpdir(logdir, name): | |
1292 | # Cleanup old log, valgrind, and core files. | |
1293 | subprocess.check_call( | |
a53c08bc | 1294 | "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir, name), shell=True |
49581587 CH |
1295 | ) |
1296 | ||
1297 | # Setup the per node directory. | |
1298 | nodelogdir = "{}/{}".format(logdir, name) | |
a53c08bc CH |
1299 | subprocess.check_call( |
1300 | "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True | |
1301 | ) | |
49581587 CH |
1302 | logfile = "{0}/{1}.log".format(logdir, name) |
1303 | return logfile | |
797e8dcf | 1304 | |
594b1259 MW |
1305 | |
1306 | class Router(Node): | |
622c4996 | 1307 | "A Node with IPv4/IPv6 forwarding enabled" |
594b1259 | 1308 | |
2ab85530 | 1309 | def __init__(self, name, **params): |
0d5e41c6 | 1310 | |
04ce2b97 RZ |
1311 | # Backward compatibility: |
1312 | # Load configuration defaults like topogen. | |
787e7624 | 1313 | self.config_defaults = configparser.ConfigParser( |
701a0192 | 1314 | defaults={ |
787e7624 | 1315 | "verbosity": "info", |
1316 | "frrdir": "/usr/lib/frr", | |
787e7624 | 1317 | "routertype": "frr", |
11761ab0 | 1318 | "memleak_path": "", |
787e7624 | 1319 | } |
1320 | ) | |
49581587 | 1321 | |
04ce2b97 | 1322 | self.config_defaults.read( |
787e7624 | 1323 | os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini") |
04ce2b97 RZ |
1324 | ) |
1325 | ||
0d5e41c6 RZ |
1326 | # If this topology is using old API and doesn't have logdir |
1327 | # specified, then attempt to generate an unique logdir. | |
49581587 | 1328 | self.logdir = params.get("logdir") |
0d5e41c6 | 1329 | if self.logdir is None: |
49581587 CH |
1330 | self.logdir = get_logs_path(g_extra_config["rundir"]) |
1331 | ||
1332 | if not params.get("logger"): | |
1333 | # If logger is present topogen has already set this up | |
1334 | logfile = setup_node_tmpdir(self.logdir, name) | |
1335 | l = topolog.get_logger(name, log_level="debug", target=logfile) | |
1336 | params["logger"] = l | |
1337 | ||
1338 | super(Router, self).__init__(name, **params) | |
0d5e41c6 | 1339 | |
2ab85530 | 1340 | self.daemondir = None |
447f2d5a | 1341 | self.hasmpls = False |
787e7624 | 1342 | self.routertype = "frr" |
a4b4bb50 | 1343 | self.unified_config = None |
787e7624 | 1344 | self.daemons = { |
1345 | "zebra": 0, | |
1346 | "ripd": 0, | |
1347 | "ripngd": 0, | |
1348 | "ospfd": 0, | |
1349 | "ospf6d": 0, | |
1350 | "isisd": 0, | |
1351 | "bgpd": 0, | |
1352 | "pimd": 0, | |
e13f9c4f | 1353 | "pim6d": 0, |
787e7624 | 1354 | "ldpd": 0, |
1355 | "eigrpd": 0, | |
1356 | "nhrpd": 0, | |
1357 | "staticd": 0, | |
1358 | "bfdd": 0, | |
1359 | "sharpd": 0, | |
a0764a36 | 1360 | "babeld": 0, |
223f87f4 | 1361 | "pbrd": 0, |
92be50e6 BC |
1362 | "pathd": 0, |
1363 | "snmpd": 0, | |
787e7624 | 1364 | } |
1365 | self.daemons_options = {"zebra": ""} | |
2a59a86b | 1366 | self.reportCores = True |
fb80b81b | 1367 | self.version = None |
2ab85530 | 1368 | |
49581587 | 1369 | self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid) |
0ba1d257 CH |
1370 | try: |
1371 | # Allow escaping from running inside docker | |
1372 | cgroup = open("/proc/1/cgroup").read() | |
1373 | m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup) | |
1374 | if m: | |
1375 | self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd | |
1376 | except IOError: | |
1377 | pass | |
1378 | else: | |
1379 | logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd)) | |
1380 | ||
edd2bdf6 RZ |
1381 | def _config_frr(self, **params): |
1382 | "Configure FRR binaries" | |
787e7624 | 1383 | self.daemondir = params.get("frrdir") |
edd2bdf6 | 1384 | if self.daemondir is None: |
787e7624 | 1385 | self.daemondir = self.config_defaults.get("topogen", "frrdir") |
edd2bdf6 | 1386 | |
787e7624 | 1387 | zebra_path = os.path.join(self.daemondir, "zebra") |
edd2bdf6 RZ |
1388 | if not os.path.isfile(zebra_path): |
1389 | raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path)) | |
1390 | ||
2ab85530 RZ |
1391 | # pylint: disable=W0221 |
1392 | # Some params are only meaningful for the parent class. | |
594b1259 MW |
1393 | def config(self, **params): |
1394 | super(Router, self).config(**params) | |
1395 | ||
2ab85530 | 1396 | # User did not specify the daemons directory, try to autodetect it. |
787e7624 | 1397 | self.daemondir = params.get("daemondir") |
2ab85530 | 1398 | if self.daemondir is None: |
787e7624 | 1399 | self.routertype = params.get( |
1400 | "routertype", self.config_defaults.get("topogen", "routertype") | |
1401 | ) | |
622c4996 | 1402 | self._config_frr(**params) |
594b1259 | 1403 | else: |
2ab85530 | 1404 | # Test the provided path |
787e7624 | 1405 | zpath = os.path.join(self.daemondir, "zebra") |
2ab85530 | 1406 | if not os.path.isfile(zpath): |
787e7624 | 1407 | raise Exception("No zebra binary found in {}".format(zpath)) |
2ab85530 | 1408 | # Allow user to specify routertype when the path was specified. |
787e7624 | 1409 | if params.get("routertype") is not None: |
1410 | self.routertype = params.get("routertype") | |
2ab85530 | 1411 | |
594b1259 | 1412 | # Set ownership of config files |
787e7624 | 1413 | self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype)) |
2ab85530 | 1414 | |
594b1259 | 1415 | def terminate(self): |
cd79342c | 1416 | # Stop running FRR daemons |
99561211 | 1417 | self.stopRouter() |
594b1259 | 1418 | super(Router, self).terminate() |
49581587 | 1419 | os.system("chmod -R go+rw " + self.logdir) |
b0f0d980 | 1420 | |
cf865d1b | 1421 | # Return count of running daemons |
f033a78a DL |
1422 | def listDaemons(self): |
1423 | ret = [] | |
a53c08bc CH |
1424 | rc, stdout, _ = self.cmd_status( |
1425 | "ls -1 /var/run/%s/*.pid" % self.routertype, warn=False | |
1426 | ) | |
49581587 CH |
1427 | if rc: |
1428 | return ret | |
1429 | for d in stdout.strip().split("\n"): | |
1430 | pidfile = d.strip() | |
1431 | try: | |
1432 | pid = int(self.cmd_raises("cat %s" % pidfile, warn=False).strip()) | |
1433 | name = os.path.basename(pidfile[:-4]) | |
1434 | ||
1435 | # probably not compatible with bsd. | |
1436 | rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False) | |
1437 | if rc: | |
a53c08bc CH |
1438 | logger.warning( |
1439 | "%s: %s exited leaving pidfile %s (%s)", | |
1440 | self.name, | |
1441 | name, | |
1442 | pidfile, | |
1443 | pid, | |
1444 | ) | |
49581587 CH |
1445 | self.cmd("rm -- " + pidfile) |
1446 | else: | |
1447 | ret.append((name, pid)) | |
1448 | except (subprocess.CalledProcessError, ValueError): | |
1449 | pass | |
f033a78a | 1450 | return ret |
cf865d1b | 1451 | |
49581587 | 1452 | def stopRouter(self, assertOnError=True, minErrorVersion="5.1"): |
cf865d1b | 1453 | # Stop Running FRR Daemons |
49581587 CH |
1454 | running = self.listDaemons() |
1455 | if not running: | |
1456 | return "" | |
1457 | ||
1458 | logger.info("%s: stopping %s", self.name, ", ".join([x[0] for x in running])) | |
1459 | for name, pid in running: | |
1460 | logger.info("{}: sending SIGTERM to {}".format(self.name, name)) | |
1461 | try: | |
1462 | os.kill(pid, signal.SIGTERM) | |
1463 | except OSError as err: | |
a53c08bc CH |
1464 | logger.info( |
1465 | "%s: could not kill %s (%s): %s", self.name, name, pid, str(err) | |
1466 | ) | |
49581587 CH |
1467 | |
1468 | running = self.listDaemons() | |
1469 | if running: | |
e9a59a2a | 1470 | for _ in range(0, 30): |
701a0192 | 1471 | sleep( |
49581587 | 1472 | 0.5, |
701a0192 | 1473 | "{}: waiting for daemons stopping: {}".format( |
49581587 | 1474 | self.name, ", ".join([x[0] for x in running]) |
701a0192 | 1475 | ), |
1476 | ) | |
f033a78a | 1477 | running = self.listDaemons() |
49581587 CH |
1478 | if not running: |
1479 | break | |
f033a78a | 1480 | |
49581587 CH |
1481 | if not running: |
1482 | return "" | |
cf865d1b | 1483 | |
a53c08bc CH |
1484 | logger.warning( |
1485 | "%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running]) | |
1486 | ) | |
49581587 CH |
1487 | for name, pid in running: |
1488 | pidfile = "/var/run/{}/{}.pid".format(self.routertype, name) | |
1489 | logger.info("%s: killing %s", self.name, name) | |
1490 | self.cmd("kill -SIGBUS %d" % pid) | |
1491 | self.cmd("rm -- " + pidfile) | |
cf865d1b | 1492 | |
a53c08bc CH |
1493 | sleep( |
1494 | 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name | |
1495 | ) | |
f033a78a DL |
1496 | |
1497 | errors = self.checkRouterCores(reportOnce=True) | |
1498 | if self.checkRouterVersion("<", minErrorVersion): | |
1499 | # ignore errors in old versions | |
1500 | errors = "" | |
49581587 | 1501 | if assertOnError and (errors is not None) and len(errors) > 0: |
f033a78a | 1502 | assert "Errors found - details follow:" == 0, errors |
83c26937 | 1503 | return errors |
f76774ec | 1504 | |
594b1259 MW |
1505 | def removeIPs(self): |
1506 | for interface in self.intfNames(): | |
49581587 CH |
1507 | try: |
1508 | self.intf_ip_cmd(interface, "ip address flush " + interface) | |
1509 | except Exception as ex: | |
1510 | logger.error("%s can't remove IPs %s", self, str(ex)) | |
1511 | # pdb.set_trace() | |
1512 | # assert False, "can't remove IPs %s" % str(ex) | |
8dd5077d PG |
1513 | |
1514 | def checkCapability(self, daemon, param): | |
1515 | if param is not None: | |
1516 | daemon_path = os.path.join(self.daemondir, daemon) | |
787e7624 | 1517 | daemon_search_option = param.replace("-", "") |
1518 | output = self.cmd( | |
1519 | "{0} -h | grep {1}".format(daemon_path, daemon_search_option) | |
1520 | ) | |
8dd5077d PG |
1521 | if daemon_search_option not in output: |
1522 | return False | |
1523 | return True | |
1524 | ||
1525 | def loadConf(self, daemon, source=None, param=None): | |
02547745 CH |
1526 | """Enabled and set config for a daemon. |
1527 | ||
1528 | Arranges for loading of daemon configuration from the specified source. Possible | |
1529 | `source` values are `None` for an empty config file, a path name which is used | |
1530 | directly, or a file name with no path components which is first looked for | |
1531 | directly and then looked for under a sub-directory named after router. | |
1532 | """ | |
1533 | ||
49581587 | 1534 | # Unfortunately this API allowsfor source to not exist for any and all routers. |
02547745 CH |
1535 | if source: |
1536 | head, tail = os.path.split(source) | |
1537 | if not head and not self.path_exists(tail): | |
1538 | script_dir = os.environ["PYTEST_TOPOTEST_SCRIPTDIR"] | |
1539 | router_relative = os.path.join(script_dir, self.name, tail) | |
1540 | if self.path_exists(router_relative): | |
1541 | source = router_relative | |
1542 | self.logger.info( | |
1543 | "using router relative configuration: {}".format(source) | |
1544 | ) | |
49581587 | 1545 | |
594b1259 | 1546 | # print "Daemons before:", self.daemons |
a4b4bb50 JAG |
1547 | if daemon in self.daemons.keys() or daemon == "frr": |
1548 | if daemon == "frr": | |
1549 | self.unified_config = 1 | |
1550 | else: | |
1551 | self.daemons[daemon] = 1 | |
8dd5077d PG |
1552 | if param is not None: |
1553 | self.daemons_options[daemon] = param | |
49581587 CH |
1554 | conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon) |
1555 | if source is None or not os.path.exists(source): | |
a4b4bb50 JAG |
1556 | if daemon == "frr" or not self.unified_config: |
1557 | self.cmd_raises("rm -f " + conf_file) | |
1558 | self.cmd_raises("touch " + conf_file) | |
594b1259 | 1559 | else: |
49581587 | 1560 | self.cmd_raises("cp {} {}".format(source, conf_file)) |
a4b4bb50 JAG |
1561 | |
1562 | if not self.unified_config or daemon == "frr": | |
1563 | self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file)) | |
1564 | self.cmd_raises("chmod 664 {}".format(conf_file)) | |
1565 | ||
92be50e6 | 1566 | if (daemon == "snmpd") and (self.routertype == "frr"): |
49581587 | 1567 | # /etc/snmp is private mount now |
a4b4bb50 | 1568 | self.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf') |
49581587 CH |
1569 | self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf') |
1570 | ||
787e7624 | 1571 | if (daemon == "zebra") and (self.daemons["staticd"] == 0): |
a2a1134c | 1572 | # Add staticd with zebra - if it exists |
49581587 CH |
1573 | try: |
1574 | staticd_path = os.path.join(self.daemondir, "staticd") | |
1575 | except: | |
1576 | pdb.set_trace() | |
1577 | ||
a2a1134c | 1578 | if os.path.isfile(staticd_path): |
787e7624 | 1579 | self.daemons["staticd"] = 1 |
1580 | self.daemons_options["staticd"] = "" | |
2c805e6c | 1581 | # Auto-Started staticd has no config, so it will read from zebra config |
594b1259 | 1582 | else: |
787e7624 | 1583 | logger.info("No daemon {} known".format(daemon)) |
594b1259 | 1584 | # print "Daemons after:", self.daemons |
e1dfa45e | 1585 | |
3f950192 | 1586 | def runInWindow(self, cmd, title=None): |
49581587 | 1587 | return self.run_in_window(cmd, title) |
3f950192 | 1588 | |
9711fc7e | 1589 | def startRouter(self, tgen=None): |
a4b4bb50 JAG |
1590 | if self.unified_config: |
1591 | self.cmd( | |
1592 | 'echo "service integrated-vtysh-config" >> /etc/%s/vtysh.conf' | |
1593 | % self.routertype | |
1594 | ) | |
1595 | else: | |
1596 | # Disable integrated-vtysh-config | |
1597 | self.cmd( | |
1598 | 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf' | |
1599 | % self.routertype | |
1600 | ) | |
1601 | ||
787e7624 | 1602 | self.cmd( |
1603 | "chown %s:%svty /etc/%s/vtysh.conf" | |
1604 | % (self.routertype, self.routertype, self.routertype) | |
1605 | ) | |
13e1fc49 | 1606 | # TODO remove the following lines after all tests are migrated to Topogen. |
594b1259 | 1607 | # Try to find relevant old logfiles in /tmp and delete them |
787e7624 | 1608 | map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name))) |
594b1259 | 1609 | # Remove old core files |
787e7624 | 1610 | map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name))) |
594b1259 MW |
1611 | # Remove IP addresses from OS first - we have them in zebra.conf |
1612 | self.removeIPs() | |
1613 | # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher | |
1614 | # No error - but return message and skip all the tests | |
787e7624 | 1615 | if self.daemons["ldpd"] == 1: |
1616 | ldpd_path = os.path.join(self.daemondir, "ldpd") | |
2ab85530 | 1617 | if not os.path.isfile(ldpd_path): |
222ea88b | 1618 | logger.info("LDP Test, but no ldpd compiled or installed") |
594b1259 | 1619 | return "LDP Test, but no ldpd compiled or installed" |
dd4eca4d | 1620 | |
787e7624 | 1621 | if version_cmp(platform.release(), "4.5") < 0: |
222ea88b | 1622 | logger.info("LDP Test need Linux Kernel 4.5 minimum") |
45619ee3 | 1623 | return "LDP Test need Linux Kernel 4.5 minimum" |
9711fc7e LB |
1624 | # Check if have mpls |
1625 | if tgen != None: | |
1626 | self.hasmpls = tgen.hasmpls | |
1627 | if self.hasmpls != True: | |
787e7624 | 1628 | logger.info( |
1629 | "LDP/MPLS Tests will be skipped, platform missing module(s)" | |
1630 | ) | |
9711fc7e LB |
1631 | else: |
1632 | # Test for MPLS Kernel modules available | |
1633 | self.hasmpls = False | |
787e7624 | 1634 | if not module_present("mpls-router"): |
1635 | logger.info( | |
1636 | "MPLS tests will not run (missing mpls-router kernel module)" | |
1637 | ) | |
1638 | elif not module_present("mpls-iptunnel"): | |
1639 | logger.info( | |
1640 | "MPLS tests will not run (missing mpls-iptunnel kernel module)" | |
1641 | ) | |
9711fc7e LB |
1642 | else: |
1643 | self.hasmpls = True | |
1644 | if self.hasmpls != True: | |
1645 | return "LDP/MPLS Tests need mpls kernel modules" | |
49581587 CH |
1646 | |
1647 | # Really want to use sysctl_atleast here, but only when MPLS is actually being | |
1648 | # used | |
787e7624 | 1649 | self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels") |
44a592b2 | 1650 | |
3f950192 CH |
1651 | shell_routers = g_extra_config["shell"] |
1652 | if "all" in shell_routers or self.name in shell_routers: | |
0bc76852 | 1653 | self.run_in_window(os.getenv("SHELL", "bash"), title="sh-%s" % self.name) |
3f950192 | 1654 | |
787e7624 | 1655 | if self.daemons["eigrpd"] == 1: |
1656 | eigrpd_path = os.path.join(self.daemondir, "eigrpd") | |
44a592b2 | 1657 | if not os.path.isfile(eigrpd_path): |
222ea88b | 1658 | logger.info("EIGRP Test, but no eigrpd compiled or installed") |
44a592b2 MW |
1659 | return "EIGRP Test, but no eigrpd compiled or installed" |
1660 | ||
787e7624 | 1661 | if self.daemons["bfdd"] == 1: |
1662 | bfdd_path = os.path.join(self.daemondir, "bfdd") | |
4d45d6d3 RZ |
1663 | if not os.path.isfile(bfdd_path): |
1664 | logger.info("BFD Test, but no bfdd compiled or installed") | |
1665 | return "BFD Test, but no bfdd compiled or installed" | |
1666 | ||
1726edc3 CH |
1667 | status = self.startRouterDaemons(tgen=tgen) |
1668 | ||
1669 | vtysh_routers = g_extra_config["vtysh"] | |
1670 | if "all" in vtysh_routers or self.name in vtysh_routers: | |
0bc76852 | 1671 | self.run_in_window("vtysh", title="vt-%s" % self.name) |
1726edc3 | 1672 | |
a4b4bb50 JAG |
1673 | if self.unified_config: |
1674 | self.cmd("vtysh -f /etc/frr/frr.conf") | |
1675 | ||
1726edc3 | 1676 | return status |
aa5261bf | 1677 | |
aa5261bf RZ |
1678 | def getStdErr(self, daemon): |
1679 | return self.getLog("err", daemon) | |
1680 | ||
1681 | def getStdOut(self, daemon): | |
1682 | return self.getLog("out", daemon) | |
1683 | ||
1684 | def getLog(self, log, daemon): | |
1685 | return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log)) | |
1686 | ||
0c449b01 | 1687 | def startRouterDaemons(self, daemons=None, tgen=None): |
49581587 | 1688 | "Starts FRR daemons for this router." |
e1dfa45e | 1689 | |
0ba1d257 | 1690 | asan_abort = g_extra_config["asan_abort"] |
0b25370e | 1691 | gdb_breakpoints = g_extra_config["gdb_breakpoints"] |
3f950192 CH |
1692 | gdb_daemons = g_extra_config["gdb_daemons"] |
1693 | gdb_routers = g_extra_config["gdb_routers"] | |
e58133a7 CH |
1694 | valgrind_extra = g_extra_config["valgrind_extra"] |
1695 | valgrind_memleaks = g_extra_config["valgrind_memleaks"] | |
0ba1d257 | 1696 | strace_daemons = g_extra_config["strace_daemons"] |
3f950192 | 1697 | |
49581587 CH |
1698 | # Get global bundle data |
1699 | if not self.path_exists("/etc/frr/support_bundle_commands.conf"): | |
1700 | # Copy global value if was covered by namespace mount | |
1701 | bundle_data = "" | |
1702 | if os.path.exists("/etc/frr/support_bundle_commands.conf"): | |
1703 | with open("/etc/frr/support_bundle_commands.conf", "r") as rf: | |
1704 | bundle_data = rf.read() | |
1705 | self.cmd_raises( | |
1706 | "cat > /etc/frr/support_bundle_commands.conf", | |
1707 | stdin=bundle_data, | |
701a0192 | 1708 | ) |
c39fe454 | 1709 | |
e1dfa45e LB |
1710 | # Starts actual daemons without init (ie restart) |
1711 | # cd to per node directory | |
49581587 CH |
1712 | self.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self.logdir, self.name)) |
1713 | self.set_cwd("{}/{}".format(self.logdir, self.name)) | |
787e7624 | 1714 | self.cmd("umask 000") |
aa5261bf | 1715 | |
787e7624 | 1716 | # Re-enable to allow for report per run |
2a59a86b | 1717 | self.reportCores = True |
aa5261bf RZ |
1718 | |
1719 | # XXX: glue code forward ported from removed function. | |
fb80b81b | 1720 | if self.version == None: |
787e7624 | 1721 | self.version = self.cmd( |
c39fe454 | 1722 | os.path.join(self.daemondir, "bgpd") + " -v" |
787e7624 | 1723 | ).split()[2] |
1724 | logger.info("{}: running version: {}".format(self.name, self.version)) | |
aa5261bf RZ |
1725 | # If `daemons` was specified then some upper API called us with |
1726 | # specific daemons, otherwise just use our own configuration. | |
1727 | daemons_list = [] | |
3f950192 | 1728 | if daemons is not None: |
bb91e9c0 MS |
1729 | daemons_list = daemons |
1730 | else: | |
aa5261bf RZ |
1731 | # Append all daemons configured. |
1732 | for daemon in self.daemons: | |
1733 | if self.daemons[daemon] == 1: | |
1734 | daemons_list.append(daemon) | |
1735 | ||
3f950192 CH |
1736 | def start_daemon(daemon, extra_opts=None): |
1737 | daemon_opts = self.daemons_options.get(daemon, "") | |
1738 | rediropt = " > {0}.out 2> {0}.err".format(daemon) | |
1739 | if daemon == "snmpd": | |
1740 | binary = "/usr/sbin/snmpd" | |
1741 | cmdenv = "" | |
1742 | cmdopt = "{} -C -c /etc/frr/snmpd.conf -p ".format( | |
1743 | daemon_opts | |
1744 | ) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self.routertype) | |
1745 | else: | |
1746 | binary = os.path.join(self.daemondir, daemon) | |
e58133a7 | 1747 | |
0ba1d257 CH |
1748 | cmdenv = "ASAN_OPTIONS=" |
1749 | if asan_abort: | |
1750 | cmdenv = "abort_on_error=1:" | |
a53c08bc CH |
1751 | cmdenv += "log_path={0}/{1}.{2}.asan ".format( |
1752 | self.logdir, self.name, daemon | |
1753 | ) | |
0ba1d257 | 1754 | |
e58133a7 | 1755 | if valgrind_memleaks: |
a53c08bc CH |
1756 | this_dir = os.path.dirname( |
1757 | os.path.abspath(os.path.realpath(__file__)) | |
1758 | ) | |
1759 | supp_file = os.path.abspath( | |
1760 | os.path.join(this_dir, "../../../tools/valgrind.supp") | |
1761 | ) | |
1762 | cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format( | |
1763 | daemon, self.logdir, self.name, supp_file | |
1764 | ) | |
e58133a7 | 1765 | if valgrind_extra: |
a53c08bc | 1766 | cmdenv += ( |
f2415785 | 1767 | " --gen-suppressions=all --expensive-definedness-checks=yes" |
a53c08bc | 1768 | ) |
0ba1d257 | 1769 | elif daemon in strace_daemons or "all" in strace_daemons: |
a53c08bc CH |
1770 | cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format( |
1771 | daemon, self.logdir, self.name | |
1772 | ) | |
0ba1d257 | 1773 | |
a7c0a04f | 1774 | cmdopt = "{} --command-log-always --log file:{}.log --log-level debug".format( |
3f950192 | 1775 | daemon_opts, daemon |
787e7624 | 1776 | ) |
3f950192 CH |
1777 | if extra_opts: |
1778 | cmdopt += " " + extra_opts | |
1779 | ||
1780 | if ( | |
1781 | (gdb_routers or gdb_daemons) | |
0b25370e DS |
1782 | and ( |
1783 | not gdb_routers or self.name in gdb_routers or "all" in gdb_routers | |
1784 | ) | |
1785 | and (not gdb_daemons or daemon in gdb_daemons or "all" in gdb_daemons) | |
3f950192 CH |
1786 | ): |
1787 | if daemon == "snmpd": | |
1788 | cmdopt += " -f " | |
1789 | ||
1790 | cmdopt += rediropt | |
1791 | gdbcmd = "sudo -E gdb " + binary | |
1792 | if gdb_breakpoints: | |
1793 | gdbcmd += " -ex 'set breakpoint pending on'" | |
1794 | for bp in gdb_breakpoints: | |
1795 | gdbcmd += " -ex 'b {}'".format(bp) | |
1796 | gdbcmd += " -ex 'run {}'".format(cmdopt) | |
1797 | ||
49581587 CH |
1798 | self.run_in_window(gdbcmd, daemon) |
1799 | ||
a53c08bc CH |
1800 | logger.info( |
1801 | "%s: %s %s launched in gdb window", self, self.routertype, daemon | |
1802 | ) | |
3f950192 CH |
1803 | else: |
1804 | if daemon != "snmpd": | |
1805 | cmdopt += " -d " | |
1806 | cmdopt += rediropt | |
49581587 CH |
1807 | |
1808 | try: | |
1809 | self.cmd_raises(" ".join([cmdenv, binary, cmdopt]), warn=False) | |
1810 | except subprocess.CalledProcessError as error: | |
1811 | self.logger.error( | |
1812 | '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:', | |
a53c08bc CH |
1813 | self, |
1814 | daemon, | |
1815 | error.returncode, | |
1816 | error.cmd, | |
1817 | '\n:stdout: "{}"'.format(error.stdout.strip()) | |
1818 | if error.stdout | |
1819 | else "", | |
1820 | '\n:stderr: "{}"'.format(error.stderr.strip()) | |
1821 | if error.stderr | |
1822 | else "", | |
49581587 CH |
1823 | ) |
1824 | else: | |
1825 | logger.info("%s: %s %s started", self, self.routertype, daemon) | |
3f950192 | 1826 | |
3f950192 CH |
1827 | # Start Zebra first |
1828 | if "zebra" in daemons_list: | |
1829 | start_daemon("zebra", "-s 90000000") | |
c39fe454 KK |
1830 | while "zebra" in daemons_list: |
1831 | daemons_list.remove("zebra") | |
aa5261bf | 1832 | |
a2a1134c | 1833 | # Start staticd next if required |
c39fe454 | 1834 | if "staticd" in daemons_list: |
3f950192 | 1835 | start_daemon("staticd") |
c39fe454 KK |
1836 | while "staticd" in daemons_list: |
1837 | daemons_list.remove("staticd") | |
aa5261bf | 1838 | |
92be50e6 | 1839 | if "snmpd" in daemons_list: |
49581587 CH |
1840 | # Give zerbra a chance to configure interface addresses that snmpd daemon |
1841 | # may then use. | |
1842 | time.sleep(2) | |
1843 | ||
3f950192 | 1844 | start_daemon("snmpd") |
92be50e6 BC |
1845 | while "snmpd" in daemons_list: |
1846 | daemons_list.remove("snmpd") | |
1847 | ||
49581587 CH |
1848 | if daemons is None: |
1849 | # Fix Link-Local Addresses on initial startup | |
1850 | # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this | |
1851 | _, output, _ = self.cmd_status( | |
1852 | "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done", | |
a53c08bc | 1853 | stderr=subprocess.STDOUT, |
49581587 CH |
1854 | ) |
1855 | logger.debug("Set MACs:\n%s", output) | |
aa5261bf | 1856 | |
594b1259 | 1857 | # Now start all the other daemons |
cb3e512d | 1858 | for daemon in daemons_list: |
aa5261bf | 1859 | if self.daemons[daemon] == 0: |
2ab85530 | 1860 | continue |
3f950192 | 1861 | start_daemon(daemon) |
787e7624 | 1862 | |
aa5261bf | 1863 | # Check if daemons are running. |
c39fe454 | 1864 | rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype) |
c65a7e26 KK |
1865 | if re.search(r"No such file or directory", rundaemons): |
1866 | return "Daemons are not running" | |
1867 | ||
49581587 CH |
1868 | # Update the permissions on the log files |
1869 | self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name)) | |
1870 | self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name)) | |
1871 | ||
c65a7e26 KK |
1872 | return "" |
1873 | ||
c39fe454 KK |
1874 | def killRouterDaemons( |
1875 | self, daemons, wait=True, assertOnError=True, minErrorVersion="5.1" | |
1876 | ): | |
622c4996 | 1877 | # Kill Running FRR |
c65a7e26 | 1878 | # Daemons(user specified daemon only) using SIGKILL |
c39fe454 | 1879 | rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype) |
c65a7e26 KK |
1880 | errors = "" |
1881 | daemonsNotRunning = [] | |
1882 | if re.search(r"No such file or directory", rundaemons): | |
1883 | return errors | |
1884 | for daemon in daemons: | |
1885 | if rundaemons is not None and daemon in rundaemons: | |
1886 | numRunning = 0 | |
701a0192 | 1887 | dmns = rundaemons.split("\n") |
cd79342c MS |
1888 | # Exclude empty string at end of list |
1889 | for d in dmns[:-1]: | |
c65a7e26 | 1890 | if re.search(r"%s" % daemon, d): |
c39fe454 KK |
1891 | daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip() |
1892 | if daemonpid.isdigit() and pid_exists(int(daemonpid)): | |
1893 | logger.info( | |
1894 | "{}: killing {}".format( | |
1895 | self.name, | |
1896 | os.path.basename(d.rstrip().rsplit(".", 1)[0]), | |
1897 | ) | |
1898 | ) | |
1899 | self.cmd("kill -9 %s" % daemonpid) | |
c65a7e26 KK |
1900 | if pid_exists(int(daemonpid)): |
1901 | numRunning += 1 | |
c9f92703 | 1902 | while wait and numRunning > 0: |
c39fe454 KK |
1903 | sleep( |
1904 | 2, | |
1905 | "{}: waiting for {} daemon to be stopped".format( | |
1906 | self.name, daemon | |
1907 | ), | |
1908 | ) | |
cd79342c | 1909 | |
c65a7e26 | 1910 | # 2nd round of kill if daemons didn't exit |
cd79342c | 1911 | for d in dmns[:-1]: |
c65a7e26 | 1912 | if re.search(r"%s" % daemon, d): |
c39fe454 KK |
1913 | daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip() |
1914 | if daemonpid.isdigit() and pid_exists( | |
1915 | int(daemonpid) | |
1916 | ): | |
1917 | logger.info( | |
1918 | "{}: killing {}".format( | |
1919 | self.name, | |
1920 | os.path.basename( | |
1921 | d.rstrip().rsplit(".", 1)[0] | |
1922 | ), | |
1923 | ) | |
1924 | ) | |
1925 | self.cmd("kill -9 %s" % daemonpid) | |
c9f92703 DS |
1926 | if daemonpid.isdigit() and not pid_exists( |
1927 | int(daemonpid) | |
1928 | ): | |
1929 | numRunning -= 1 | |
1930 | self.cmd("rm -- {}".format(d.rstrip())) | |
c65a7e26 KK |
1931 | if wait: |
1932 | errors = self.checkRouterCores(reportOnce=True) | |
c39fe454 KK |
1933 | if self.checkRouterVersion("<", minErrorVersion): |
1934 | # ignore errors in old versions | |
c65a7e26 KK |
1935 | errors = "" |
1936 | if assertOnError and len(errors) > 0: | |
1937 | assert "Errors found - details follow:" == 0, errors | |
c65a7e26 KK |
1938 | else: |
1939 | daemonsNotRunning.append(daemon) | |
1940 | if len(daemonsNotRunning) > 0: | |
c39fe454 | 1941 | errors = errors + "Daemons are not running", daemonsNotRunning |
c65a7e26 KK |
1942 | |
1943 | return errors | |
1944 | ||
2a59a86b LB |
1945 | def checkRouterCores(self, reportLeaks=True, reportOnce=False): |
1946 | if reportOnce and not self.reportCores: | |
1947 | return | |
1948 | reportMade = False | |
83c26937 | 1949 | traces = "" |
f76774ec | 1950 | for daemon in self.daemons: |
787e7624 | 1951 | if self.daemons[daemon] == 1: |
f76774ec | 1952 | # Look for core file |
787e7624 | 1953 | corefiles = glob.glob( |
1954 | "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon) | |
1955 | ) | |
1956 | if len(corefiles) > 0: | |
79f6fdeb | 1957 | backtrace = gdb_core(self, daemon, corefiles) |
787e7624 | 1958 | traces = ( |
1959 | traces | |
1960 | + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s" | |
1961 | % (self.name, daemon, backtrace) | |
1962 | ) | |
2a59a86b | 1963 | reportMade = True |
f76774ec LB |
1964 | elif reportLeaks: |
1965 | log = self.getStdErr(daemon) | |
1966 | if "memstats" in log: | |
787e7624 | 1967 | sys.stderr.write( |
1968 | "%s: %s has memory leaks:\n" % (self.name, daemon) | |
1969 | ) | |
1970 | traces = traces + "\n%s: %s has memory leaks:\n" % ( | |
1971 | self.name, | |
1972 | daemon, | |
1973 | ) | |
f76774ec | 1974 | log = re.sub("core_handler: ", "", log) |
787e7624 | 1975 | log = re.sub( |
1976 | r"(showing active allocations in memory group [a-zA-Z0-9]+)", | |
1977 | r"\n ## \1", | |
1978 | log, | |
1979 | ) | |
f76774ec LB |
1980 | log = re.sub("memstats: ", " ", log) |
1981 | sys.stderr.write(log) | |
2a59a86b | 1982 | reportMade = True |
f76774ec | 1983 | # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found |
787e7624 | 1984 | if checkAddressSanitizerError( |
be2656ed | 1985 | self.getStdErr(daemon), self.name, daemon, self.logdir |
787e7624 | 1986 | ): |
1987 | sys.stderr.write( | |
1988 | "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon) | |
1989 | ) | |
1990 | traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % ( | |
1991 | self.name, | |
1992 | daemon, | |
1993 | ) | |
2a59a86b LB |
1994 | reportMade = True |
1995 | if reportMade: | |
1996 | self.reportCores = False | |
83c26937 | 1997 | return traces |
f76774ec | 1998 | |
594b1259 | 1999 | def checkRouterRunning(self): |
597cabb7 MW |
2000 | "Check if router daemons are running and collect crashinfo they don't run" |
2001 | ||
594b1259 MW |
2002 | global fatal_error |
2003 | ||
787e7624 | 2004 | daemonsRunning = self.cmd( |
2005 | 'vtysh -c "show logging" | grep "Logging configuration for"' | |
2006 | ) | |
4942f298 MW |
2007 | # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found |
2008 | if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"): | |
2009 | return "%s: vtysh killed by AddressSanitizer" % (self.name) | |
2010 | ||
594b1259 | 2011 | for daemon in self.daemons: |
662c0576 KS |
2012 | if daemon == "snmpd": |
2013 | continue | |
594b1259 MW |
2014 | if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning): |
2015 | sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon)) | |
11761ab0 | 2016 | if daemon == "staticd": |
787e7624 | 2017 | sys.stderr.write( |
2018 | "You may have a copy of staticd installed but are attempting to test against\n" | |
2019 | ) | |
2020 | sys.stderr.write( | |
2021 | "a version of FRR that does not have staticd, please cleanup the install dir\n" | |
2022 | ) | |
d2132114 | 2023 | |
594b1259 | 2024 | # Look for core file |
787e7624 | 2025 | corefiles = glob.glob( |
2026 | "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon) | |
2027 | ) | |
2028 | if len(corefiles) > 0: | |
79f6fdeb | 2029 | gdb_core(self, daemon, corefiles) |
594b1259 MW |
2030 | else: |
2031 | # No core found - If we find matching logfile in /tmp, then print last 20 lines from it. | |
787e7624 | 2032 | if os.path.isfile( |
2033 | "{}/{}/{}.log".format(self.logdir, self.name, daemon) | |
2034 | ): | |
2035 | log_tail = subprocess.check_output( | |
2036 | [ | |
2037 | "tail -n20 {}/{}/{}.log 2> /dev/null".format( | |
2038 | self.logdir, self.name, daemon | |
2039 | ) | |
2040 | ], | |
2041 | shell=True, | |
2042 | ) | |
2043 | sys.stderr.write( | |
2044 | "\nFrom %s %s %s log file:\n" | |
2045 | % (self.routertype, self.name, daemon) | |
2046 | ) | |
594b1259 | 2047 | sys.stderr.write("%s\n" % log_tail) |
4942f298 | 2048 | |
597cabb7 | 2049 | # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found |
787e7624 | 2050 | if checkAddressSanitizerError( |
be2656ed | 2051 | self.getStdErr(daemon), self.name, daemon, self.logdir |
787e7624 | 2052 | ): |
2053 | return "%s: Daemon %s not running - killed by AddressSanitizer" % ( | |
2054 | self.name, | |
2055 | daemon, | |
2056 | ) | |
84379e8e | 2057 | |
594b1259 MW |
2058 | return "%s: Daemon %s not running" % (self.name, daemon) |
2059 | return "" | |
fb80b81b LB |
2060 | |
2061 | def checkRouterVersion(self, cmpop, version): | |
2062 | """ | |
2063 | Compares router version using operation `cmpop` with `version`. | |
2064 | Valid `cmpop` values: | |
2065 | * `>=`: has the same version or greater | |
2066 | * '>': has greater version | |
2067 | * '=': has the same version | |
2068 | * '<': has a lesser version | |
2069 | * '<=': has the same version or lesser | |
2070 | ||
2071 | Usage example: router.checkRouterVersion('>', '1.0') | |
2072 | """ | |
6bfe4b8b MW |
2073 | |
2074 | # Make sure we have version information first | |
2075 | if self.version == None: | |
787e7624 | 2076 | self.version = self.cmd( |
2077 | os.path.join(self.daemondir, "bgpd") + " -v" | |
2078 | ).split()[2] | |
2079 | logger.info("{}: running version: {}".format(self.name, self.version)) | |
6bfe4b8b | 2080 | |
fb80b81b | 2081 | rversion = self.version |
11761ab0 | 2082 | if rversion == None: |
fb80b81b LB |
2083 | return False |
2084 | ||
2085 | result = version_cmp(rversion, version) | |
787e7624 | 2086 | if cmpop == ">=": |
fb80b81b | 2087 | return result >= 0 |
787e7624 | 2088 | if cmpop == ">": |
fb80b81b | 2089 | return result > 0 |
787e7624 | 2090 | if cmpop == "=": |
fb80b81b | 2091 | return result == 0 |
787e7624 | 2092 | if cmpop == "<": |
fb80b81b | 2093 | return result < 0 |
787e7624 | 2094 | if cmpop == "<": |
fb80b81b | 2095 | return result < 0 |
787e7624 | 2096 | if cmpop == "<=": |
fb80b81b LB |
2097 | return result <= 0 |
2098 | ||
594b1259 MW |
2099 | def get_ipv6_linklocal(self): |
2100 | "Get LinkLocal Addresses from interfaces" | |
2101 | ||
2102 | linklocal = [] | |
2103 | ||
787e7624 | 2104 | ifaces = self.cmd("ip -6 address") |
594b1259 | 2105 | # Fix newlines (make them all the same) |
787e7624 | 2106 | ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines() |
2107 | interface = "" | |
2108 | ll_per_if_count = 0 | |
594b1259 | 2109 | for line in ifaces: |
fd03dacd | 2110 | m = re.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line) |
594b1259 MW |
2111 | if m: |
2112 | interface = m.group(1) | |
2113 | ll_per_if_count = 0 | |
787e7624 | 2114 | m = re.search( |
2115 | "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link", | |
2116 | line, | |
2117 | ) | |
594b1259 MW |
2118 | if m: |
2119 | local = m.group(1) | |
2120 | ll_per_if_count += 1 | |
787e7624 | 2121 | if ll_per_if_count > 1: |
594b1259 MW |
2122 | linklocal += [["%s-%s" % (interface, ll_per_if_count), local]] |
2123 | else: | |
2124 | linklocal += [[interface, local]] | |
2125 | return linklocal | |
787e7624 | 2126 | |
80eeefb7 MW |
2127 | def daemon_available(self, daemon): |
2128 | "Check if specified daemon is installed (and for ldp if kernel supports MPLS)" | |
2129 | ||
2ab85530 RZ |
2130 | daemon_path = os.path.join(self.daemondir, daemon) |
2131 | if not os.path.isfile(daemon_path): | |
80eeefb7 | 2132 | return False |
787e7624 | 2133 | if daemon == "ldpd": |
2134 | if version_cmp(platform.release(), "4.5") < 0: | |
b431b554 | 2135 | return False |
787e7624 | 2136 | if not module_present("mpls-router", load=False): |
80eeefb7 | 2137 | return False |
787e7624 | 2138 | if not module_present("mpls-iptunnel", load=False): |
b431b554 | 2139 | return False |
80eeefb7 | 2140 | return True |
f2d6ce41 | 2141 | |
80eeefb7 | 2142 | def get_routertype(self): |
622c4996 | 2143 | "Return the type of Router (frr)" |
80eeefb7 MW |
2144 | |
2145 | return self.routertype | |
787e7624 | 2146 | |
50c40bde MW |
2147 | def report_memory_leaks(self, filename_prefix, testscript): |
2148 | "Report Memory Leaks to file prefixed with given string" | |
2149 | ||
2150 | leakfound = False | |
2151 | filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt" | |
2152 | for daemon in self.daemons: | |
787e7624 | 2153 | if self.daemons[daemon] == 1: |
50c40bde MW |
2154 | log = self.getStdErr(daemon) |
2155 | if "memstats" in log: | |
2156 | # Found memory leak | |
787e7624 | 2157 | logger.info( |
2158 | "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log) | |
2159 | ) | |
50c40bde MW |
2160 | if not leakfound: |
2161 | leakfound = True | |
2162 | # Check if file already exists | |
2163 | fileexists = os.path.isfile(filename) | |
2164 | leakfile = open(filename, "a") | |
2165 | if not fileexists: | |
2166 | # New file - add header | |
787e7624 | 2167 | leakfile.write( |
2168 | "# Memory Leak Detection for topotest %s\n\n" | |
2169 | % testscript | |
2170 | ) | |
50c40bde MW |
2171 | leakfile.write("## Router %s\n" % self.name) |
2172 | leakfile.write("### Process %s\n" % daemon) | |
2173 | log = re.sub("core_handler: ", "", log) | |
787e7624 | 2174 | log = re.sub( |
2175 | r"(showing active allocations in memory group [a-zA-Z0-9]+)", | |
2176 | r"\n#### \1\n", | |
2177 | log, | |
2178 | ) | |
50c40bde MW |
2179 | log = re.sub("memstats: ", " ", log) |
2180 | leakfile.write(log) | |
2181 | leakfile.write("\n") | |
2182 | if leakfound: | |
2183 | leakfile.close() | |
80eeefb7 | 2184 | |
787e7624 | 2185 | |
61196140 | 2186 | def frr_unicode(s): |
701a0192 | 2187 | """Convert string to unicode, depending on python version""" |
61196140 MS |
2188 | if sys.version_info[0] > 2: |
2189 | return s | |
2190 | else: | |
49581587 | 2191 | return unicode(s) # pylint: disable=E0602 |
c8e5983d CH |
2192 | |
2193 | ||
2194 | def is_mapping(o): | |
2195 | return isinstance(o, Mapping) |