]>
Commit | Line | Data |
---|---|---|
594b1259 | 1 | #!/usr/bin/env python |
acddc0ed | 2 | # SPDX-License-Identifier: ISC |
594b1259 MW |
3 | |
4 | # | |
5 | # topotest.py | |
6 | # Library of helper functions for NetDEF Topology Tests | |
7 | # | |
8 | # Copyright (c) 2016 by | |
9 | # Network Device Education Foundation, Inc. ("NetDEF") | |
10 | # | |
594b1259 | 11 | |
49581587 | 12 | import difflib |
50c40bde | 13 | import errno |
fd858290 | 14 | import functools |
594b1259 | 15 | import glob |
49581587 CH |
16 | import json |
17 | import os | |
18 | import pdb | |
19 | import platform | |
20 | import re | |
21 | import resource | |
22 | import signal | |
594b1259 | 23 | import subprocess |
49581587 | 24 | import sys |
1fca63c1 | 25 | import tempfile |
570f25d8 | 26 | import time |
49581587 | 27 | from copy import deepcopy |
594b1259 | 28 | |
49581587 | 29 | import lib.topolog as topolog |
6c131bd3 RZ |
30 | from lib.topolog import logger |
31 | ||
04ce2b97 RZ |
32 | if sys.version_info[0] > 2: |
33 | import configparser | |
c8e5983d | 34 | from collections.abc import Mapping |
04ce2b97 RZ |
35 | else: |
36 | import ConfigParser as configparser | |
c8e5983d | 37 | from collections import Mapping |
04ce2b97 | 38 | |
49581587 CH |
39 | from lib import micronet |
40 | from lib.micronet_compat import Node | |
594b1259 | 41 | |
3f950192 | 42 | g_extra_config = {} |
701a0192 | 43 | |
a53c08bc | 44 | |
49581587 CH |
45 | def get_logs_path(rundir): |
46 | logspath = topolog.get_test_logdir() | |
47 | return os.path.join(rundir, logspath) | |
48 | ||
0b25370e | 49 | |
79f6fdeb | 50 | def gdb_core(obj, daemon, corefiles): |
701a0192 | 51 | gdbcmds = """ |
79f6fdeb DL |
52 | info threads |
53 | bt full | |
54 | disassemble | |
55 | up | |
56 | disassemble | |
57 | up | |
58 | disassemble | |
59 | up | |
60 | disassemble | |
61 | up | |
62 | disassemble | |
63 | up | |
64 | disassemble | |
701a0192 | 65 | """ |
66 | gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")] | |
79f6fdeb DL |
67 | gdbcmds = [item for sl in gdbcmds for item in sl] |
68 | ||
69 | daemon_path = os.path.join(obj.daemondir, daemon) | |
70 | backtrace = subprocess.check_output( | |
701a0192 | 71 | ["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds |
79f6fdeb DL |
72 | ) |
73 | sys.stderr.write( | |
701a0192 | 74 | "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon) |
79f6fdeb DL |
75 | ) |
76 | sys.stderr.write("%s" % backtrace) | |
77 | return backtrace | |
787e7624 | 78 | |
701a0192 | 79 | |
3668ed8d RZ |
80 | class json_cmp_result(object): |
81 | "json_cmp result class for better assertion messages" | |
82 | ||
83 | def __init__(self): | |
84 | self.errors = [] | |
85 | ||
86 | def add_error(self, error): | |
87 | "Append error message to the result" | |
2db5888d RZ |
88 | for line in error.splitlines(): |
89 | self.errors.append(line) | |
3668ed8d RZ |
90 | |
91 | def has_errors(self): | |
92 | "Returns True if there were errors, otherwise False." | |
93 | return len(self.errors) > 0 | |
94 | ||
849224d4 G |
95 | def gen_report(self): |
96 | headline = ["Generated JSON diff error report:", ""] | |
97 | return headline + self.errors | |
98 | ||
7fe06d55 | 99 | def __str__(self): |
849224d4 G |
100 | return ( |
101 | "Generated JSON diff error report:\n\n\n" + "\n".join(self.errors) + "\n\n" | |
102 | ) | |
7fe06d55 | 103 | |
da63d5b3 | 104 | |
849224d4 | 105 | def gen_json_diff_report(d1, d2, exact=False, path="> $", acc=(0, "")): |
7bd28cfc | 106 | """ |
849224d4 | 107 | Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye. |
7bd28cfc | 108 | """ |
849224d4 G |
109 | |
110 | def dump_json(v): | |
111 | if isinstance(v, (dict, list)): | |
112 | return "\t" + "\t".join( | |
113 | json.dumps(v, indent=4, separators=(",", ": ")).splitlines(True) | |
787e7624 | 114 | ) |
849224d4 G |
115 | else: |
116 | return "'{}'".format(v) | |
117 | ||
118 | def json_type(v): | |
119 | if isinstance(v, (list, tuple)): | |
120 | return "Array" | |
121 | elif isinstance(v, dict): | |
122 | return "Object" | |
123 | elif isinstance(v, (int, float)): | |
124 | return "Number" | |
125 | elif isinstance(v, bool): | |
126 | return "Boolean" | |
127 | elif isinstance(v, str): | |
128 | return "String" | |
129 | elif v == None: | |
130 | return "null" | |
131 | ||
132 | def get_errors(other_acc): | |
133 | return other_acc[1] | |
134 | ||
135 | def get_errors_n(other_acc): | |
136 | return other_acc[0] | |
137 | ||
138 | def add_error(acc, msg, points=1): | |
139 | return (acc[0] + points, acc[1] + "{}: {}\n".format(path, msg)) | |
140 | ||
141 | def merge_errors(acc, other_acc): | |
142 | return (acc[0] + other_acc[0], acc[1] + other_acc[1]) | |
143 | ||
144 | def add_idx(idx): | |
145 | return "{}[{}]".format(path, idx) | |
146 | ||
147 | def add_key(key): | |
148 | return "{}->{}".format(path, key) | |
149 | ||
150 | def has_errors(other_acc): | |
151 | return other_acc[0] > 0 | |
152 | ||
153 | if d2 == "*" or ( | |
154 | not isinstance(d1, (list, dict)) | |
155 | and not isinstance(d2, (list, dict)) | |
156 | and d1 == d2 | |
157 | ): | |
158 | return acc | |
159 | elif ( | |
160 | not isinstance(d1, (list, dict)) | |
161 | and not isinstance(d2, (list, dict)) | |
162 | and d1 != d2 | |
163 | ): | |
164 | acc = add_error( | |
165 | acc, | |
166 | "d1 has element with value '{}' but in d2 it has value '{}'".format(d1, d2), | |
787e7624 | 167 | ) |
849224d4 G |
168 | elif ( |
169 | isinstance(d1, list) | |
170 | and isinstance(d2, list) | |
171 | and ((len(d2) > 0 and d2[0] == "__ordered__") or exact) | |
172 | ): | |
173 | if not exact: | |
174 | del d2[0] | |
175 | if len(d1) != len(d2): | |
176 | acc = add_error( | |
177 | acc, | |
178 | "d1 has Array of length {} but in d2 it is of length {}".format( | |
179 | len(d1), len(d2) | |
180 | ), | |
787e7624 | 181 | ) |
849224d4 G |
182 | else: |
183 | for idx, v1, v2 in zip(range(0, len(d1)), d1, d2): | |
184 | acc = merge_errors( | |
185 | acc, gen_json_diff_report(v1, v2, exact=exact, path=add_idx(idx)) | |
186 | ) | |
187 | elif isinstance(d1, list) and isinstance(d2, list): | |
188 | if len(d1) < len(d2): | |
189 | acc = add_error( | |
190 | acc, | |
191 | "d1 has Array of length {} but in d2 it is of length {}".format( | |
192 | len(d1), len(d2) | |
193 | ), | |
194 | ) | |
195 | else: | |
196 | for idx2, v2 in zip(range(0, len(d2)), d2): | |
197 | found_match = False | |
198 | closest_diff = None | |
199 | closest_idx = None | |
200 | for idx1, v1 in zip(range(0, len(d1)), d1): | |
b3100f6c G |
201 | tmp_v1 = deepcopy(v1) |
202 | tmp_v2 = deepcopy(v2) | |
203 | tmp_diff = gen_json_diff_report(tmp_v1, tmp_v2, path=add_idx(idx1)) | |
849224d4 G |
204 | if not has_errors(tmp_diff): |
205 | found_match = True | |
206 | del d1[idx1] | |
207 | break | |
208 | elif not closest_diff or get_errors_n(tmp_diff) < get_errors_n( | |
209 | closest_diff | |
210 | ): | |
211 | closest_diff = tmp_diff | |
212 | closest_idx = idx1 | |
213 | if not found_match and isinstance(v2, (list, dict)): | |
214 | sub_error = "\n\n\t{}".format( | |
215 | "\t".join(get_errors(closest_diff).splitlines(True)) | |
216 | ) | |
217 | acc = add_error( | |
218 | acc, | |
219 | ( | |
220 | "d2 has the following element at index {} which is not present in d1: " | |
221 | + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}" | |
222 | ).format(idx2, dump_json(v2), closest_idx, sub_error), | |
223 | ) | |
224 | if not found_match and not isinstance(v2, (list, dict)): | |
225 | acc = add_error( | |
226 | acc, | |
227 | "d2 has the following element at index {} which is not present in d1: {}".format( | |
228 | idx2, dump_json(v2) | |
229 | ), | |
230 | ) | |
231 | elif isinstance(d1, dict) and isinstance(d2, dict) and exact: | |
232 | invalid_keys_d1 = [k for k in d1.keys() if k not in d2.keys()] | |
233 | invalid_keys_d2 = [k for k in d2.keys() if k not in d1.keys()] | |
234 | for k in invalid_keys_d1: | |
235 | acc = add_error(acc, "d1 has key '{}' which is not present in d2".format(k)) | |
236 | for k in invalid_keys_d2: | |
237 | acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k)) | |
238 | valid_keys_intersection = [k for k in d1.keys() if k in d2.keys()] | |
239 | for k in valid_keys_intersection: | |
240 | acc = merge_errors( | |
241 | acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k)) | |
242 | ) | |
243 | elif isinstance(d1, dict) and isinstance(d2, dict): | |
244 | none_keys = [k for k, v in d2.items() if v == None] | |
245 | none_keys_present = [k for k in d1.keys() if k in none_keys] | |
246 | for k in none_keys_present: | |
247 | acc = add_error( | |
248 | acc, "d1 has key '{}' which is not supposed to be present".format(k) | |
249 | ) | |
250 | keys = [k for k, v in d2.items() if v != None] | |
251 | invalid_keys_intersection = [k for k in keys if k not in d1.keys()] | |
252 | for k in invalid_keys_intersection: | |
253 | acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k)) | |
254 | valid_keys_intersection = [k for k in keys if k in d1.keys()] | |
255 | for k in valid_keys_intersection: | |
256 | acc = merge_errors( | |
257 | acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k)) | |
258 | ) | |
259 | else: | |
260 | acc = add_error( | |
261 | acc, | |
262 | "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format( | |
263 | json_type(d1), json_type(d2) | |
264 | ), | |
265 | points=2, | |
787e7624 | 266 | ) |
a82e5f9a | 267 | |
849224d4 | 268 | return acc |
a82e5f9a | 269 | |
849224d4 G |
270 | |
271 | def json_cmp(d1, d2, exact=False): | |
09e21b44 RZ |
272 | """ |
273 | JSON compare function. Receives two parameters: | |
849224d4 G |
274 | * `d1`: parsed JSON data structure |
275 | * `d2`: parsed JSON data structure | |
276 | ||
277 | Returns 'None' when all JSON Object keys and all Array elements of d2 have a match | |
49581587 | 278 | in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an |
849224d4 G |
279 | error report is generated and wrapped in a 'json_cmp_result()'. There are special |
280 | parameters and notations explained below which can be used to cover rather unusual | |
281 | cases: | |
282 | ||
283 | * when 'exact is set to 'True' then d1 and d2 are tested for equality (including | |
284 | order within JSON Arrays) | |
285 | * using 'null' (or 'None' in Python) as JSON Object value is checking for key | |
286 | absence in d1 | |
287 | * using '*' as JSON Object value or Array value is checking for presence in d1 | |
288 | without checking the values | |
289 | * using '__ordered__' as first element in a JSON Array in d2 will also check the | |
290 | order when it is compared to an Array in d1 | |
09e21b44 | 291 | """ |
09e21b44 | 292 | |
849224d4 | 293 | (errors_n, errors) = gen_json_diff_report(deepcopy(d1), deepcopy(d2), exact=exact) |
3668ed8d | 294 | |
849224d4 G |
295 | if errors_n > 0: |
296 | result = json_cmp_result() | |
297 | result.add_error(errors) | |
3668ed8d | 298 | return result |
849224d4 G |
299 | else: |
300 | return None | |
09e21b44 | 301 | |
a82e5f9a | 302 | |
5cffda18 RZ |
303 | def router_output_cmp(router, cmd, expected): |
304 | """ | |
305 | Runs `cmd` in router and compares the output with `expected`. | |
306 | """ | |
787e7624 | 307 | return difflines( |
308 | normalize_text(router.vtysh_cmd(cmd)), | |
309 | normalize_text(expected), | |
310 | title1="Current output", | |
311 | title2="Expected output", | |
312 | ) | |
5cffda18 RZ |
313 | |
314 | ||
849224d4 | 315 | def router_json_cmp(router, cmd, data, exact=False): |
5cffda18 RZ |
316 | """ |
317 | Runs `cmd` that returns JSON data (normally the command ends with 'json') | |
318 | and compare with `data` contents. | |
319 | """ | |
849224d4 | 320 | return json_cmp(router.vtysh_cmd(cmd, isjson=True), data, exact) |
5cffda18 RZ |
321 | |
322 | ||
1fca63c1 RZ |
323 | def run_and_expect(func, what, count=20, wait=3): |
324 | """ | |
325 | Run `func` and compare the result with `what`. Do it for `count` times | |
326 | waiting `wait` seconds between tries. By default it tries 20 times with | |
327 | 3 seconds delay between tries. | |
328 | ||
329 | Returns (True, func-return) on success or | |
330 | (False, func-return) on failure. | |
5cffda18 RZ |
331 | |
332 | --- | |
333 | ||
334 | Helper functions to use with this function: | |
335 | - router_output_cmp | |
336 | - router_json_cmp | |
1fca63c1 | 337 | """ |
fd858290 RZ |
338 | start_time = time.time() |
339 | func_name = "<unknown>" | |
340 | if func.__class__ == functools.partial: | |
341 | func_name = func.func.__name__ | |
342 | else: | |
343 | func_name = func.__name__ | |
344 | ||
a5722d5a DA |
345 | # Just a safety-check to avoid running topotests with very |
346 | # small wait/count arguments. | |
347 | wait_time = wait * count | |
348 | if wait_time < 5: | |
349 | assert ( | |
350 | wait_time >= 5 | |
351 | ), "Waiting time is too small (count={}, wait={}), adjust timer values".format( | |
352 | count, wait | |
353 | ) | |
354 | ||
fd858290 | 355 | logger.info( |
8d3dab20 IR |
356 | "'{}' polling started (interval {} secs, maximum {} tries)".format( |
357 | func_name, wait, count | |
787e7624 | 358 | ) |
359 | ) | |
fd858290 | 360 | |
1fca63c1 RZ |
361 | while count > 0: |
362 | result = func() | |
363 | if result != what: | |
570f25d8 | 364 | time.sleep(wait) |
1fca63c1 RZ |
365 | count -= 1 |
366 | continue | |
fd858290 RZ |
367 | |
368 | end_time = time.time() | |
787e7624 | 369 | logger.info( |
370 | "'{}' succeeded after {:.2f} seconds".format( | |
371 | func_name, end_time - start_time | |
372 | ) | |
373 | ) | |
1fca63c1 | 374 | return (True, result) |
fd858290 RZ |
375 | |
376 | end_time = time.time() | |
787e7624 | 377 | logger.error( |
378 | "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time) | |
379 | ) | |
1fca63c1 RZ |
380 | return (False, result) |
381 | ||
382 | ||
a6fd124a RZ |
383 | def run_and_expect_type(func, etype, count=20, wait=3, avalue=None): |
384 | """ | |
385 | Run `func` and compare the result with `etype`. Do it for `count` times | |
386 | waiting `wait` seconds between tries. By default it tries 20 times with | |
387 | 3 seconds delay between tries. | |
388 | ||
389 | This function is used when you want to test the return type and, | |
390 | optionally, the return value. | |
391 | ||
392 | Returns (True, func-return) on success or | |
393 | (False, func-return) on failure. | |
394 | """ | |
395 | start_time = time.time() | |
396 | func_name = "<unknown>" | |
397 | if func.__class__ == functools.partial: | |
398 | func_name = func.func.__name__ | |
399 | else: | |
400 | func_name = func.__name__ | |
401 | ||
a5722d5a DA |
402 | # Just a safety-check to avoid running topotests with very |
403 | # small wait/count arguments. | |
404 | wait_time = wait * count | |
405 | if wait_time < 5: | |
406 | assert ( | |
407 | wait_time >= 5 | |
408 | ), "Waiting time is too small (count={}, wait={}), adjust timer values".format( | |
409 | count, wait | |
410 | ) | |
411 | ||
a6fd124a RZ |
412 | logger.info( |
413 | "'{}' polling started (interval {} secs, maximum wait {} secs)".format( | |
787e7624 | 414 | func_name, wait, int(wait * count) |
415 | ) | |
416 | ) | |
a6fd124a RZ |
417 | |
418 | while count > 0: | |
419 | result = func() | |
420 | if not isinstance(result, etype): | |
787e7624 | 421 | logger.debug( |
422 | "Expected result type '{}' got '{}' instead".format(etype, type(result)) | |
423 | ) | |
a6fd124a RZ |
424 | time.sleep(wait) |
425 | count -= 1 | |
426 | continue | |
427 | ||
428 | if etype != type(None) and avalue != None and result != avalue: | |
429 | logger.debug("Expected value '{}' got '{}' instead".format(avalue, result)) | |
430 | time.sleep(wait) | |
431 | count -= 1 | |
432 | continue | |
433 | ||
434 | end_time = time.time() | |
787e7624 | 435 | logger.info( |
436 | "'{}' succeeded after {:.2f} seconds".format( | |
437 | func_name, end_time - start_time | |
438 | ) | |
439 | ) | |
a6fd124a RZ |
440 | return (True, result) |
441 | ||
442 | end_time = time.time() | |
787e7624 | 443 | logger.error( |
444 | "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time) | |
445 | ) | |
a6fd124a RZ |
446 | return (False, result) |
447 | ||
448 | ||
1375385a CH |
449 | def router_json_cmp_retry(router, cmd, data, exact=False, retry_timeout=10.0): |
450 | """ | |
451 | Runs `cmd` that returns JSON data (normally the command ends with 'json') | |
452 | and compare with `data` contents. Retry by default for 10 seconds | |
453 | """ | |
454 | ||
455 | def test_func(): | |
456 | return router_json_cmp(router, cmd, data, exact) | |
457 | ||
458 | ok, _ = run_and_expect(test_func, None, int(retry_timeout), 1) | |
459 | return ok | |
460 | ||
461 | ||
594b1259 MW |
462 | def int2dpid(dpid): |
463 | "Converting Integer to DPID" | |
464 | ||
465 | try: | |
466 | dpid = hex(dpid)[2:] | |
787e7624 | 467 | dpid = "0" * (16 - len(dpid)) + dpid |
594b1259 MW |
468 | return dpid |
469 | except IndexError: | |
787e7624 | 470 | raise Exception( |
471 | "Unable to derive default datapath ID - " | |
472 | "please either specify a dpid or use a " | |
473 | "canonical switch name such as s23." | |
474 | ) | |
475 | ||
594b1259 | 476 | |
50c40bde MW |
477 | def pid_exists(pid): |
478 | "Check whether pid exists in the current process table." | |
479 | ||
480 | if pid <= 0: | |
481 | return False | |
f033a78a DL |
482 | try: |
483 | os.waitpid(pid, os.WNOHANG) | |
484 | except: | |
485 | pass | |
50c40bde MW |
486 | try: |
487 | os.kill(pid, 0) | |
488 | except OSError as err: | |
489 | if err.errno == errno.ESRCH: | |
490 | # ESRCH == No such process | |
491 | return False | |
492 | elif err.errno == errno.EPERM: | |
493 | # EPERM clearly means there's a process to deny access to | |
494 | return True | |
495 | else: | |
496 | # According to "man 2 kill" possible error values are | |
497 | # (EINVAL, EPERM, ESRCH) | |
498 | raise | |
499 | else: | |
500 | return True | |
501 | ||
787e7624 | 502 | |
bc2872fd | 503 | def get_textdiff(text1, text2, title1="", title2="", **opts): |
17070436 MW |
504 | "Returns empty string if same or formatted diff" |
505 | ||
787e7624 | 506 | diff = "\n".join( |
507 | difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts) | |
508 | ) | |
17070436 MW |
509 | # Clean up line endings |
510 | diff = os.linesep.join([s for s in diff.splitlines() if s]) | |
511 | return diff | |
512 | ||
787e7624 | 513 | |
514 | def difflines(text1, text2, title1="", title2="", **opts): | |
1fca63c1 | 515 | "Wrapper for get_textdiff to avoid string transformations." |
787e7624 | 516 | text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1) |
517 | text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1) | |
bc2872fd | 518 | return get_textdiff(text1, text2, title1, title2, **opts) |
1fca63c1 | 519 | |
787e7624 | 520 | |
1fca63c1 RZ |
521 | def get_file(content): |
522 | """ | |
523 | Generates a temporary file in '/tmp' with `content` and returns the file name. | |
524 | """ | |
49581587 CH |
525 | if isinstance(content, list) or isinstance(content, tuple): |
526 | content = "\n".join(content) | |
787e7624 | 527 | fde = tempfile.NamedTemporaryFile(mode="w", delete=False) |
1fca63c1 RZ |
528 | fname = fde.name |
529 | fde.write(content) | |
530 | fde.close() | |
531 | return fname | |
532 | ||
787e7624 | 533 | |
f7840f6b RZ |
534 | def normalize_text(text): |
535 | """ | |
9683a1bb | 536 | Strips formating spaces/tabs, carriage returns and trailing whitespace. |
f7840f6b | 537 | """ |
787e7624 | 538 | text = re.sub(r"[ \t]+", " ", text) |
539 | text = re.sub(r"\r", "", text) | |
9683a1bb RZ |
540 | |
541 | # Remove whitespace in the middle of text. | |
787e7624 | 542 | text = re.sub(r"[ \t]+\n", "\n", text) |
9683a1bb RZ |
543 | # Remove whitespace at the end of the text. |
544 | text = text.rstrip() | |
545 | ||
f7840f6b RZ |
546 | return text |
547 | ||
787e7624 | 548 | |
0414a764 DS |
549 | def is_linux(): |
550 | """ | |
551 | Parses unix name output to check if running on GNU/Linux. | |
552 | ||
553 | Returns True if running on Linux, returns False otherwise. | |
554 | """ | |
555 | ||
556 | if os.uname()[0] == "Linux": | |
557 | return True | |
558 | return False | |
559 | ||
560 | ||
561 | def iproute2_is_vrf_capable(): | |
562 | """ | |
563 | Checks if the iproute2 version installed on the system is capable of | |
564 | handling VRFs by interpreting the output of the 'ip' utility found in PATH. | |
565 | ||
566 | Returns True if capability can be detected, returns False otherwise. | |
567 | """ | |
568 | ||
569 | if is_linux(): | |
570 | try: | |
571 | subp = subprocess.Popen( | |
572 | ["ip", "route", "show", "vrf"], | |
573 | stdout=subprocess.PIPE, | |
574 | stderr=subprocess.PIPE, | |
0b25370e | 575 | stdin=subprocess.PIPE, |
0414a764 DS |
576 | ) |
577 | iproute2_err = subp.communicate()[1].splitlines()[0].split()[0] | |
578 | ||
579 | if iproute2_err != "Error:": | |
580 | return True | |
581 | except Exception: | |
582 | pass | |
583 | return False | |
584 | ||
51c33a57 SW |
585 | def iproute2_is_fdb_get_capable(): |
586 | """ | |
587 | Checks if the iproute2 version installed on the system is capable of | |
588 | handling `bridge fdb get` commands to query neigh table resolution. | |
589 | ||
590 | Returns True if capability can be detected, returns False otherwise. | |
591 | """ | |
592 | ||
593 | if is_linux(): | |
594 | try: | |
595 | subp = subprocess.Popen( | |
596 | ["bridge", "fdb", "get", "help"], | |
597 | stdout=subprocess.PIPE, | |
598 | stderr=subprocess.PIPE, | |
599 | stdin=subprocess.PIPE, | |
600 | ) | |
601 | iproute2_out = subp.communicate()[1].splitlines()[0].split()[0] | |
602 | ||
603 | if "Usage" in str(iproute2_out): | |
604 | return True | |
605 | except Exception: | |
606 | pass | |
607 | return False | |
0414a764 | 608 | |
cc95fbd9 | 609 | def module_present_linux(module, load): |
f2d6ce41 CF |
610 | """ |
611 | Returns whether `module` is present. | |
612 | ||
613 | If `load` is true, it will try to load it via modprobe. | |
614 | """ | |
787e7624 | 615 | with open("/proc/modules", "r") as modules_file: |
616 | if module.replace("-", "_") in modules_file.read(): | |
f2d6ce41 | 617 | return True |
787e7624 | 618 | cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module) |
f2d6ce41 CF |
619 | if os.system(cmd) != 0: |
620 | return False | |
621 | else: | |
622 | return True | |
623 | ||
787e7624 | 624 | |
cc95fbd9 DS |
625 | def module_present_freebsd(module, load): |
626 | return True | |
627 | ||
787e7624 | 628 | |
cc95fbd9 DS |
629 | def module_present(module, load=True): |
630 | if sys.platform.startswith("linux"): | |
28440fd9 | 631 | return module_present_linux(module, load) |
cc95fbd9 | 632 | elif sys.platform.startswith("freebsd"): |
28440fd9 | 633 | return module_present_freebsd(module, load) |
cc95fbd9 | 634 | |
787e7624 | 635 | |
4190fe1e RZ |
636 | def version_cmp(v1, v2): |
637 | """ | |
638 | Compare two version strings and returns: | |
639 | ||
640 | * `-1`: if `v1` is less than `v2` | |
641 | * `0`: if `v1` is equal to `v2` | |
642 | * `1`: if `v1` is greater than `v2` | |
643 | ||
644 | Raises `ValueError` if versions are not well formated. | |
645 | """ | |
787e7624 | 646 | vregex = r"(?P<whole>\d+(\.(\d+))*)" |
4190fe1e RZ |
647 | v1m = re.match(vregex, v1) |
648 | v2m = re.match(vregex, v2) | |
649 | if v1m is None or v2m is None: | |
650 | raise ValueError("got a invalid version string") | |
651 | ||
652 | # Split values | |
787e7624 | 653 | v1g = v1m.group("whole").split(".") |
654 | v2g = v2m.group("whole").split(".") | |
4190fe1e RZ |
655 | |
656 | # Get the longest version string | |
657 | vnum = len(v1g) | |
658 | if len(v2g) > vnum: | |
659 | vnum = len(v2g) | |
660 | ||
661 | # Reverse list because we are going to pop the tail | |
662 | v1g.reverse() | |
663 | v2g.reverse() | |
664 | for _ in range(vnum): | |
665 | try: | |
666 | v1n = int(v1g.pop()) | |
667 | except IndexError: | |
668 | while v2g: | |
669 | v2n = int(v2g.pop()) | |
670 | if v2n > 0: | |
671 | return -1 | |
672 | break | |
673 | ||
674 | try: | |
675 | v2n = int(v2g.pop()) | |
676 | except IndexError: | |
677 | if v1n > 0: | |
678 | return 1 | |
679 | while v1g: | |
680 | v1n = int(v1g.pop()) | |
681 | if v1n > 0: | |
034237db | 682 | return 1 |
4190fe1e RZ |
683 | break |
684 | ||
685 | if v1n > v2n: | |
686 | return 1 | |
687 | if v1n < v2n: | |
688 | return -1 | |
689 | return 0 | |
690 | ||
787e7624 | 691 | |
f5612168 PG |
692 | def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None): |
693 | if ifaceaction: | |
787e7624 | 694 | str_ifaceaction = "no shutdown" |
f5612168 | 695 | else: |
787e7624 | 696 | str_ifaceaction = "shutdown" |
f5612168 | 697 | if vrf_name == None: |
787e7624 | 698 | cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format( |
699 | ifacename, str_ifaceaction | |
700 | ) | |
f5612168 | 701 | else: |
9fa6ec14 | 702 | cmd = ( |
703 | 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format( | |
704 | ifacename, vrf_name, str_ifaceaction | |
705 | ) | |
787e7624 | 706 | ) |
f5612168 PG |
707 | node.run(cmd) |
708 | ||
787e7624 | 709 | |
b220b3c8 PG |
710 | def ip4_route_zebra(node, vrf_name=None): |
711 | """ | |
712 | Gets an output of 'show ip route' command. It can be used | |
713 | with comparing the output to a reference | |
714 | """ | |
715 | if vrf_name == None: | |
787e7624 | 716 | tmp = node.vtysh_cmd("show ip route") |
b220b3c8 | 717 | else: |
787e7624 | 718 | tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name)) |
b220b3c8 | 719 | output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp) |
41077aa1 CF |
720 | |
721 | lines = output.splitlines() | |
722 | header_found = False | |
0eff5820 | 723 | while lines and (not lines[0].strip() or not header_found): |
5a3cf853 | 724 | if "o - offload failure" in lines[0]: |
41077aa1 CF |
725 | header_found = True |
726 | lines = lines[1:] | |
787e7624 | 727 | return "\n".join(lines) |
728 | ||
b220b3c8 | 729 | |
e394d9aa MS |
730 | def ip6_route_zebra(node, vrf_name=None): |
731 | """ | |
732 | Retrieves the output of 'show ipv6 route [vrf vrf_name]', then | |
733 | canonicalizes it by eliding link-locals. | |
734 | """ | |
735 | ||
736 | if vrf_name == None: | |
787e7624 | 737 | tmp = node.vtysh_cmd("show ipv6 route") |
e394d9aa | 738 | else: |
787e7624 | 739 | tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name)) |
e394d9aa MS |
740 | |
741 | # Mask out timestamp | |
742 | output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp) | |
743 | ||
744 | # Mask out the link-local addresses | |
787e7624 | 745 | output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output) |
e394d9aa MS |
746 | |
747 | lines = output.splitlines() | |
748 | header_found = False | |
749 | while lines and (not lines[0].strip() or not header_found): | |
5a3cf853 | 750 | if "o - offload failure" in lines[0]: |
e394d9aa MS |
751 | header_found = True |
752 | lines = lines[1:] | |
753 | ||
787e7624 | 754 | return "\n".join(lines) |
e394d9aa MS |
755 | |
756 | ||
2f726781 MW |
757 | def proto_name_to_number(protocol): |
758 | return { | |
787e7624 | 759 | "bgp": "186", |
760 | "isis": "187", | |
761 | "ospf": "188", | |
762 | "rip": "189", | |
763 | "ripng": "190", | |
764 | "nhrp": "191", | |
765 | "eigrp": "192", | |
766 | "ldp": "193", | |
767 | "sharp": "194", | |
768 | "pbr": "195", | |
769 | "static": "196", | |
d1b5fa5b | 770 | "ospf6": "197", |
787e7624 | 771 | }.get( |
772 | protocol, protocol | |
773 | ) # default return same as input | |
2f726781 MW |
774 | |
775 | ||
99a7a912 RZ |
776 | def ip4_route(node): |
777 | """ | |
778 | Gets a structured return of the command 'ip route'. It can be used in | |
4563e204 | 779 | conjunction with json_cmp() to provide accurate assert explanations. |
99a7a912 RZ |
780 | |
781 | Return example: | |
782 | { | |
783 | '10.0.1.0/24': { | |
784 | 'dev': 'eth0', | |
785 | 'via': '172.16.0.1', | |
786 | 'proto': '188', | |
787 | }, | |
788 | '10.0.2.0/24': { | |
789 | 'dev': 'eth1', | |
790 | 'proto': 'kernel', | |
791 | } | |
792 | } | |
793 | """ | |
787e7624 | 794 | output = normalize_text(node.run("ip route")).splitlines() |
99a7a912 RZ |
795 | result = {} |
796 | for line in output: | |
787e7624 | 797 | columns = line.split(" ") |
99a7a912 RZ |
798 | route = result[columns[0]] = {} |
799 | prev = None | |
800 | for column in columns: | |
787e7624 | 801 | if prev == "dev": |
802 | route["dev"] = column | |
803 | if prev == "via": | |
804 | route["via"] = column | |
805 | if prev == "proto": | |
2f726781 | 806 | # translate protocol names back to numbers |
787e7624 | 807 | route["proto"] = proto_name_to_number(column) |
808 | if prev == "metric": | |
809 | route["metric"] = column | |
810 | if prev == "scope": | |
811 | route["scope"] = column | |
99a7a912 RZ |
812 | prev = column |
813 | ||
814 | return result | |
815 | ||
787e7624 | 816 | |
9375b5aa | 817 | def ip4_vrf_route(node): |
818 | """ | |
819 | Gets a structured return of the command 'ip route show vrf {0}-cust1'. | |
4563e204 | 820 | It can be used in conjunction with json_cmp() to provide accurate assert explanations. |
9375b5aa | 821 | |
822 | Return example: | |
823 | { | |
824 | '10.0.1.0/24': { | |
825 | 'dev': 'eth0', | |
826 | 'via': '172.16.0.1', | |
827 | 'proto': '188', | |
828 | }, | |
829 | '10.0.2.0/24': { | |
830 | 'dev': 'eth1', | |
831 | 'proto': 'kernel', | |
832 | } | |
833 | } | |
834 | """ | |
835 | output = normalize_text( | |
701a0192 | 836 | node.run("ip route show vrf {0}-cust1".format(node.name)) |
837 | ).splitlines() | |
9375b5aa | 838 | |
839 | result = {} | |
840 | for line in output: | |
841 | columns = line.split(" ") | |
842 | route = result[columns[0]] = {} | |
843 | prev = None | |
844 | for column in columns: | |
845 | if prev == "dev": | |
846 | route["dev"] = column | |
847 | if prev == "via": | |
848 | route["via"] = column | |
849 | if prev == "proto": | |
850 | # translate protocol names back to numbers | |
851 | route["proto"] = proto_name_to_number(column) | |
852 | if prev == "metric": | |
853 | route["metric"] = column | |
854 | if prev == "scope": | |
855 | route["scope"] = column | |
856 | prev = column | |
857 | ||
858 | return result | |
859 | ||
860 | ||
99a7a912 RZ |
861 | def ip6_route(node): |
862 | """ | |
863 | Gets a structured return of the command 'ip -6 route'. It can be used in | |
4563e204 | 864 | conjunction with json_cmp() to provide accurate assert explanations. |
99a7a912 RZ |
865 | |
866 | Return example: | |
867 | { | |
868 | '2001:db8:1::/64': { | |
869 | 'dev': 'eth0', | |
870 | 'proto': '188', | |
871 | }, | |
872 | '2001:db8:2::/64': { | |
873 | 'dev': 'eth1', | |
874 | 'proto': 'kernel', | |
875 | } | |
876 | } | |
877 | """ | |
787e7624 | 878 | output = normalize_text(node.run("ip -6 route")).splitlines() |
99a7a912 RZ |
879 | result = {} |
880 | for line in output: | |
787e7624 | 881 | columns = line.split(" ") |
99a7a912 RZ |
882 | route = result[columns[0]] = {} |
883 | prev = None | |
884 | for column in columns: | |
787e7624 | 885 | if prev == "dev": |
886 | route["dev"] = column | |
887 | if prev == "via": | |
888 | route["via"] = column | |
889 | if prev == "proto": | |
2f726781 | 890 | # translate protocol names back to numbers |
787e7624 | 891 | route["proto"] = proto_name_to_number(column) |
892 | if prev == "metric": | |
893 | route["metric"] = column | |
894 | if prev == "pref": | |
895 | route["pref"] = column | |
99a7a912 RZ |
896 | prev = column |
897 | ||
898 | return result | |
899 | ||
787e7624 | 900 | |
9375b5aa | 901 | def ip6_vrf_route(node): |
902 | """ | |
903 | Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'. | |
4563e204 | 904 | It can be used in conjunction with json_cmp() to provide accurate assert explanations. |
9375b5aa | 905 | |
906 | Return example: | |
907 | { | |
908 | '2001:db8:1::/64': { | |
909 | 'dev': 'eth0', | |
910 | 'proto': '188', | |
911 | }, | |
912 | '2001:db8:2::/64': { | |
913 | 'dev': 'eth1', | |
914 | 'proto': 'kernel', | |
915 | } | |
916 | } | |
917 | """ | |
918 | output = normalize_text( | |
701a0192 | 919 | node.run("ip -6 route show vrf {0}-cust1".format(node.name)) |
920 | ).splitlines() | |
9375b5aa | 921 | result = {} |
922 | for line in output: | |
923 | columns = line.split(" ") | |
924 | route = result[columns[0]] = {} | |
925 | prev = None | |
926 | for column in columns: | |
927 | if prev == "dev": | |
928 | route["dev"] = column | |
929 | if prev == "via": | |
930 | route["via"] = column | |
931 | if prev == "proto": | |
932 | # translate protocol names back to numbers | |
933 | route["proto"] = proto_name_to_number(column) | |
934 | if prev == "metric": | |
935 | route["metric"] = column | |
936 | if prev == "pref": | |
937 | route["pref"] = column | |
938 | prev = column | |
939 | ||
940 | return result | |
941 | ||
942 | ||
9b7decf2 JU |
943 | def ip_rules(node): |
944 | """ | |
945 | Gets a structured return of the command 'ip rule'. It can be used in | |
4563e204 | 946 | conjunction with json_cmp() to provide accurate assert explanations. |
9b7decf2 JU |
947 | |
948 | Return example: | |
949 | [ | |
950 | { | |
951 | "pref": "0" | |
952 | "from": "all" | |
953 | }, | |
954 | { | |
955 | "pref": "32766" | |
956 | "from": "all" | |
957 | }, | |
958 | { | |
959 | "to": "3.4.5.0/24", | |
960 | "iif": "r1-eth2", | |
961 | "pref": "304", | |
962 | "from": "1.2.0.0/16", | |
963 | "proto": "zebra" | |
964 | } | |
965 | ] | |
966 | """ | |
967 | output = normalize_text(node.run("ip rule")).splitlines() | |
968 | result = [] | |
969 | for line in output: | |
970 | columns = line.split(" ") | |
971 | ||
972 | route = {} | |
973 | # remove last character, since it is ':' | |
974 | pref = columns[0][:-1] | |
975 | route["pref"] = pref | |
976 | prev = None | |
977 | for column in columns: | |
978 | if prev == "from": | |
979 | route["from"] = column | |
980 | if prev == "to": | |
981 | route["to"] = column | |
982 | if prev == "proto": | |
983 | route["proto"] = column | |
984 | if prev == "iif": | |
985 | route["iif"] = column | |
986 | if prev == "fwmark": | |
987 | route["fwmark"] = column | |
988 | prev = column | |
989 | ||
990 | result.append(route) | |
991 | return result | |
992 | ||
993 | ||
570f25d8 RZ |
994 | def sleep(amount, reason=None): |
995 | """ | |
996 | Sleep wrapper that registers in the log the amount of sleep | |
997 | """ | |
998 | if reason is None: | |
787e7624 | 999 | logger.info("Sleeping for {} seconds".format(amount)) |
570f25d8 | 1000 | else: |
787e7624 | 1001 | logger.info(reason + " ({} seconds)".format(amount)) |
570f25d8 RZ |
1002 | |
1003 | time.sleep(amount) | |
1004 | ||
787e7624 | 1005 | |
be2656ed | 1006 | def checkAddressSanitizerError(output, router, component, logdir=""): |
4942f298 MW |
1007 | "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise" |
1008 | ||
be2656ed | 1009 | def processAddressSanitizerError(asanErrorRe, output, router, component): |
787e7624 | 1010 | sys.stderr.write( |
1011 | "%s: %s triggered an exception by AddressSanitizer\n" % (router, component) | |
1012 | ) | |
4942f298 | 1013 | # Sanitizer Error found in log |
be2656ed | 1014 | pidMark = asanErrorRe.group(1) |
6ee4440e | 1015 | addressSanitizerLog = re.search( |
787e7624 | 1016 | "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL |
1017 | ) | |
6ee4440e | 1018 | if addressSanitizerLog: |
be2656ed | 1019 | # Find Calling Test. Could be multiple steps back |
9fa6ec14 | 1020 | testframe = sys._current_frames().values()[0] |
1021 | level = 0 | |
be2656ed | 1022 | while level < 10: |
9fa6ec14 | 1023 | test = os.path.splitext( |
1024 | os.path.basename(testframe.f_globals["__file__"]) | |
1025 | )[0] | |
be2656ed MW |
1026 | if (test != "topotest") and (test != "topogen"): |
1027 | # Found the calling test | |
9fa6ec14 | 1028 | callingTest = os.path.basename(testframe.f_globals["__file__"]) |
be2656ed | 1029 | break |
9fa6ec14 | 1030 | level = level + 1 |
1031 | testframe = testframe.f_back | |
1032 | if level >= 10: | |
be2656ed | 1033 | # somehow couldn't find the test script. |
9fa6ec14 | 1034 | callingTest = "unknownTest" |
be2656ed MW |
1035 | # |
1036 | # Now finding Calling Procedure | |
9fa6ec14 | 1037 | level = 0 |
be2656ed | 1038 | while level < 20: |
9fa6ec14 | 1039 | callingProc = sys._getframe(level).f_code.co_name |
1040 | if ( | |
1041 | (callingProc != "processAddressSanitizerError") | |
1042 | and (callingProc != "checkAddressSanitizerError") | |
1043 | and (callingProc != "checkRouterCores") | |
1044 | and (callingProc != "stopRouter") | |
9fa6ec14 | 1045 | and (callingProc != "stop") |
1046 | and (callingProc != "stop_topology") | |
1047 | and (callingProc != "checkRouterRunning") | |
1048 | and (callingProc != "check_router_running") | |
1049 | and (callingProc != "routers_have_failure") | |
1050 | ): | |
be2656ed MW |
1051 | # Found the calling test |
1052 | break | |
9fa6ec14 | 1053 | level = level + 1 |
1054 | if level >= 20: | |
be2656ed | 1055 | # something wrong - couldn't found the calling test function |
9fa6ec14 | 1056 | callingProc = "unknownProc" |
4942f298 | 1057 | with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile: |
be2656ed MW |
1058 | sys.stderr.write( |
1059 | "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" | |
1060 | % (callingTest, callingProc, router) | |
1061 | ) | |
787e7624 | 1062 | sys.stderr.write( |
6ee4440e | 1063 | "\n".join(addressSanitizerLog.group(1).splitlines()) + "\n" |
787e7624 | 1064 | ) |
be2656ed | 1065 | addrSanFile.write("## Error: %s\n\n" % asanErrorRe.group(2)) |
787e7624 | 1066 | addrSanFile.write( |
1067 | "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" | |
1068 | % (callingTest, callingProc, router) | |
1069 | ) | |
1070 | addrSanFile.write( | |
1071 | " " | |
6ee4440e | 1072 | + "\n ".join(addressSanitizerLog.group(1).splitlines()) |
787e7624 | 1073 | + "\n" |
1074 | ) | |
4942f298 | 1075 | addrSanFile.write("\n---------------\n") |
be2656ed MW |
1076 | return |
1077 | ||
6ee4440e | 1078 | addressSanitizerError = re.search( |
49581587 | 1079 | r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output |
be2656ed | 1080 | ) |
6ee4440e MS |
1081 | if addressSanitizerError: |
1082 | processAddressSanitizerError(addressSanitizerError, output, router, component) | |
4942f298 | 1083 | return True |
be2656ed MW |
1084 | |
1085 | # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file | |
1086 | if logdir: | |
9fa6ec14 | 1087 | filepattern = logdir + "/" + router + "/" + component + ".asan.*" |
1088 | logger.debug( | |
1089 | "Log check for %s on %s, pattern %s\n" % (component, router, filepattern) | |
1090 | ) | |
be2656ed MW |
1091 | for file in glob.glob(filepattern): |
1092 | with open(file, "r") as asanErrorFile: | |
9fa6ec14 | 1093 | asanError = asanErrorFile.read() |
6ee4440e | 1094 | addressSanitizerError = re.search( |
49581587 | 1095 | r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError |
9fa6ec14 | 1096 | ) |
6ee4440e | 1097 | if addressSanitizerError: |
9fa6ec14 | 1098 | processAddressSanitizerError( |
1099 | addressSanitizerError, asanError, router, component | |
1100 | ) | |
be2656ed | 1101 | return True |
6c131bd3 | 1102 | return False |
4942f298 | 1103 | |
787e7624 | 1104 | |
49581587 CH |
1105 | def _sysctl_atleast(commander, variable, min_value): |
1106 | if isinstance(min_value, tuple): | |
1107 | min_value = list(min_value) | |
1108 | is_list = isinstance(min_value, list) | |
594b1259 | 1109 | |
49581587 CH |
1110 | sval = commander.cmd_raises("sysctl -n " + variable).strip() |
1111 | if is_list: | |
1112 | cur_val = [int(x) for x in sval.split()] | |
1113 | else: | |
1114 | cur_val = int(sval) | |
1115 | ||
1116 | set_value = False | |
1117 | if is_list: | |
1118 | for i, v in enumerate(cur_val): | |
1119 | if v < min_value[i]: | |
1120 | set_value = True | |
1121 | else: | |
1122 | min_value[i] = v | |
1123 | else: | |
1124 | if cur_val < min_value: | |
1125 | set_value = True | |
1126 | if set_value: | |
1127 | if is_list: | |
1128 | valstr = " ".join([str(x) for x in min_value]) | |
1129 | else: | |
1130 | valstr = str(min_value) | |
1131 | logger.info("Increasing sysctl %s from %s to %s", variable, cur_val, valstr) | |
a53c08bc | 1132 | commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr)) |
594b1259 | 1133 | |
787e7624 | 1134 | |
49581587 CH |
1135 | def _sysctl_assure(commander, variable, value): |
1136 | if isinstance(value, tuple): | |
1137 | value = list(value) | |
1138 | is_list = isinstance(value, list) | |
797e8dcf | 1139 | |
49581587 CH |
1140 | sval = commander.cmd_raises("sysctl -n " + variable).strip() |
1141 | if is_list: | |
1142 | cur_val = [int(x) for x in sval.split()] | |
1143 | else: | |
1144 | cur_val = sval | |
797e8dcf | 1145 | |
49581587 CH |
1146 | set_value = False |
1147 | if is_list: | |
1148 | for i, v in enumerate(cur_val): | |
1149 | if v != value[i]: | |
1150 | set_value = True | |
1151 | else: | |
1152 | value[i] = v | |
1153 | else: | |
1154 | if cur_val != str(value): | |
1155 | set_value = True | |
1156 | ||
1157 | if set_value: | |
1158 | if is_list: | |
1159 | valstr = " ".join([str(x) for x in value]) | |
1160 | else: | |
1161 | valstr = str(value) | |
1162 | logger.info("Changing sysctl %s from %s to %s", variable, cur_val, valstr) | |
a53c08bc | 1163 | commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr)) |
49581587 CH |
1164 | |
1165 | ||
1166 | def sysctl_atleast(commander, variable, min_value, raises=False): | |
1167 | try: | |
1168 | if commander is None: | |
1169 | commander = micronet.Commander("topotest") | |
1170 | return _sysctl_atleast(commander, variable, min_value) | |
1171 | except subprocess.CalledProcessError as error: | |
1172 | logger.warning( | |
1173 | "%s: Failed to assure sysctl min value %s = %s", | |
a53c08bc CH |
1174 | commander, |
1175 | variable, | |
1176 | min_value, | |
49581587 CH |
1177 | ) |
1178 | if raises: | |
1179 | raise | |
797e8dcf | 1180 | |
787e7624 | 1181 | |
49581587 CH |
1182 | def sysctl_assure(commander, variable, value, raises=False): |
1183 | try: | |
1184 | if commander is None: | |
1185 | commander = micronet.Commander("topotest") | |
1186 | return _sysctl_assure(commander, variable, value) | |
1187 | except subprocess.CalledProcessError as error: | |
1188 | logger.warning( | |
1189 | "%s: Failed to assure sysctl value %s = %s", | |
a53c08bc CH |
1190 | commander, |
1191 | variable, | |
1192 | value, | |
1193 | exc_info=True, | |
49581587 CH |
1194 | ) |
1195 | if raises: | |
1196 | raise | |
1197 | ||
1198 | ||
1199 | def rlimit_atleast(rname, min_value, raises=False): | |
1200 | try: | |
1201 | cval = resource.getrlimit(rname) | |
1202 | soft, hard = cval | |
1203 | if soft < min_value: | |
1204 | nval = (min_value, hard if min_value < hard else min_value) | |
1205 | logger.info("Increasing rlimit %s from %s to %s", rname, cval, nval) | |
1206 | resource.setrlimit(rname, nval) | |
1207 | except subprocess.CalledProcessError as error: | |
1208 | logger.warning( | |
a53c08bc | 1209 | "Failed to assure rlimit [%s] = %s", rname, min_value, exc_info=True |
49581587 CH |
1210 | ) |
1211 | if raises: | |
1212 | raise | |
1213 | ||
1214 | ||
1215 | def fix_netns_limits(ns): | |
1216 | ||
1217 | # Maximum read and write socket buffer sizes | |
a53c08bc CH |
1218 | sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20]) |
1219 | sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20]) | |
49581587 CH |
1220 | |
1221 | sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0) | |
1222 | sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0) | |
1223 | sysctl_assure(ns, "net.ipv4.conf.lo.rp_filter", 0) | |
1224 | ||
1225 | sysctl_assure(ns, "net.ipv4.conf.all.forwarding", 1) | |
1226 | sysctl_assure(ns, "net.ipv4.conf.default.forwarding", 1) | |
1227 | ||
1228 | # XXX if things fail look here as this wasn't done previously | |
1229 | sysctl_assure(ns, "net.ipv6.conf.all.forwarding", 1) | |
1230 | sysctl_assure(ns, "net.ipv6.conf.default.forwarding", 1) | |
1231 | ||
1232 | # ARP | |
1233 | sysctl_assure(ns, "net.ipv4.conf.default.arp_announce", 2) | |
1234 | sysctl_assure(ns, "net.ipv4.conf.default.arp_notify", 1) | |
1235 | # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for | |
1236 | sysctl_assure(ns, "net.ipv4.conf.default.arp_ignore", 0) | |
1237 | sysctl_assure(ns, "net.ipv4.conf.all.arp_announce", 2) | |
1238 | sysctl_assure(ns, "net.ipv4.conf.all.arp_notify", 1) | |
1239 | # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for | |
1240 | sysctl_assure(ns, "net.ipv4.conf.all.arp_ignore", 0) | |
1241 | ||
1242 | sysctl_assure(ns, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1) | |
1243 | ||
1244 | # Keep ipv6 permanent addresses on an admin down | |
1245 | sysctl_assure(ns, "net.ipv6.conf.all.keep_addr_on_down", 1) | |
1246 | if version_cmp(platform.release(), "4.20") >= 0: | |
1247 | sysctl_assure(ns, "net.ipv6.route.skip_notify_on_dev_down", 1) | |
1248 | ||
1249 | sysctl_assure(ns, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1) | |
1250 | sysctl_assure(ns, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1) | |
1251 | ||
1252 | # igmp | |
1253 | sysctl_atleast(ns, "net.ipv4.igmp_max_memberships", 1000) | |
1254 | ||
1255 | # Use neigh information on selection of nexthop for multipath hops | |
1256 | sysctl_assure(ns, "net.ipv4.fib_multipath_use_neigh", 1) | |
1257 | ||
1258 | ||
1259 | def fix_host_limits(): | |
1260 | """Increase system limits.""" | |
1261 | ||
a53c08bc CH |
1262 | rlimit_atleast(resource.RLIMIT_NPROC, 8 * 1024) |
1263 | rlimit_atleast(resource.RLIMIT_NOFILE, 16 * 1024) | |
1264 | sysctl_atleast(None, "fs.file-max", 16 * 1024) | |
1265 | sysctl_atleast(None, "kernel.pty.max", 16 * 1024) | |
49581587 CH |
1266 | |
1267 | # Enable coredumps | |
1268 | # Original on ubuntu 17.x, but apport won't save as in namespace | |
1269 | # |/usr/share/apport/apport %p %s %c %d %P | |
1270 | sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp") | |
1271 | sysctl_assure(None, "kernel.core_uses_pid", 1) | |
1272 | sysctl_assure(None, "fs.suid_dumpable", 1) | |
1273 | ||
1274 | # Maximum connection backlog | |
a53c08bc | 1275 | sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024) |
49581587 CH |
1276 | |
1277 | # Maximum read and write socket buffer sizes | |
a53c08bc CH |
1278 | sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20) |
1279 | sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20) | |
49581587 CH |
1280 | |
1281 | # Garbage Collection Settings for ARP and Neighbors | |
a53c08bc CH |
1282 | sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024) |
1283 | sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024) | |
1284 | sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024) | |
1285 | sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024) | |
49581587 | 1286 | # Hold entries for 10 minutes |
a53c08bc CH |
1287 | sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000) |
1288 | sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000) | |
49581587 CH |
1289 | |
1290 | # igmp | |
1291 | sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10) | |
1292 | ||
1293 | # MLD | |
1294 | sysctl_atleast(None, "net.ipv6.mld_max_msf", 512) | |
1295 | ||
1296 | # Increase routing table size to 128K | |
a53c08bc CH |
1297 | sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024) |
1298 | sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024) | |
49581587 CH |
1299 | |
1300 | ||
1301 | def setup_node_tmpdir(logdir, name): | |
1302 | # Cleanup old log, valgrind, and core files. | |
1303 | subprocess.check_call( | |
a53c08bc | 1304 | "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir, name), shell=True |
49581587 CH |
1305 | ) |
1306 | ||
1307 | # Setup the per node directory. | |
1308 | nodelogdir = "{}/{}".format(logdir, name) | |
a53c08bc CH |
1309 | subprocess.check_call( |
1310 | "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True | |
1311 | ) | |
49581587 CH |
1312 | logfile = "{0}/{1}.log".format(logdir, name) |
1313 | return logfile | |
797e8dcf | 1314 | |
594b1259 MW |
1315 | |
1316 | class Router(Node): | |
622c4996 | 1317 | "A Node with IPv4/IPv6 forwarding enabled" |
594b1259 | 1318 | |
2ab85530 | 1319 | def __init__(self, name, **params): |
0d5e41c6 | 1320 | |
04ce2b97 RZ |
1321 | # Backward compatibility: |
1322 | # Load configuration defaults like topogen. | |
787e7624 | 1323 | self.config_defaults = configparser.ConfigParser( |
701a0192 | 1324 | defaults={ |
787e7624 | 1325 | "verbosity": "info", |
1326 | "frrdir": "/usr/lib/frr", | |
787e7624 | 1327 | "routertype": "frr", |
11761ab0 | 1328 | "memleak_path": "", |
787e7624 | 1329 | } |
1330 | ) | |
49581587 | 1331 | |
04ce2b97 | 1332 | self.config_defaults.read( |
787e7624 | 1333 | os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini") |
04ce2b97 RZ |
1334 | ) |
1335 | ||
0d5e41c6 RZ |
1336 | # If this topology is using old API and doesn't have logdir |
1337 | # specified, then attempt to generate an unique logdir. | |
49581587 | 1338 | self.logdir = params.get("logdir") |
0d5e41c6 | 1339 | if self.logdir is None: |
49581587 CH |
1340 | self.logdir = get_logs_path(g_extra_config["rundir"]) |
1341 | ||
1342 | if not params.get("logger"): | |
1343 | # If logger is present topogen has already set this up | |
1344 | logfile = setup_node_tmpdir(self.logdir, name) | |
1345 | l = topolog.get_logger(name, log_level="debug", target=logfile) | |
1346 | params["logger"] = l | |
1347 | ||
1348 | super(Router, self).__init__(name, **params) | |
0d5e41c6 | 1349 | |
2ab85530 | 1350 | self.daemondir = None |
447f2d5a | 1351 | self.hasmpls = False |
787e7624 | 1352 | self.routertype = "frr" |
a4b4bb50 | 1353 | self.unified_config = None |
787e7624 | 1354 | self.daemons = { |
1355 | "zebra": 0, | |
1356 | "ripd": 0, | |
1357 | "ripngd": 0, | |
1358 | "ospfd": 0, | |
1359 | "ospf6d": 0, | |
1360 | "isisd": 0, | |
1361 | "bgpd": 0, | |
1362 | "pimd": 0, | |
e13f9c4f | 1363 | "pim6d": 0, |
787e7624 | 1364 | "ldpd": 0, |
1365 | "eigrpd": 0, | |
1366 | "nhrpd": 0, | |
1367 | "staticd": 0, | |
1368 | "bfdd": 0, | |
1369 | "sharpd": 0, | |
a0764a36 | 1370 | "babeld": 0, |
223f87f4 | 1371 | "pbrd": 0, |
92be50e6 BC |
1372 | "pathd": 0, |
1373 | "snmpd": 0, | |
787e7624 | 1374 | } |
1375 | self.daemons_options = {"zebra": ""} | |
2a59a86b | 1376 | self.reportCores = True |
fb80b81b | 1377 | self.version = None |
2ab85530 | 1378 | |
49581587 | 1379 | self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid) |
0ba1d257 CH |
1380 | try: |
1381 | # Allow escaping from running inside docker | |
1382 | cgroup = open("/proc/1/cgroup").read() | |
1383 | m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup) | |
1384 | if m: | |
1385 | self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd | |
1386 | except IOError: | |
1387 | pass | |
1388 | else: | |
1389 | logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd)) | |
1390 | ||
edd2bdf6 RZ |
1391 | def _config_frr(self, **params): |
1392 | "Configure FRR binaries" | |
787e7624 | 1393 | self.daemondir = params.get("frrdir") |
edd2bdf6 | 1394 | if self.daemondir is None: |
787e7624 | 1395 | self.daemondir = self.config_defaults.get("topogen", "frrdir") |
edd2bdf6 | 1396 | |
787e7624 | 1397 | zebra_path = os.path.join(self.daemondir, "zebra") |
edd2bdf6 RZ |
1398 | if not os.path.isfile(zebra_path): |
1399 | raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path)) | |
1400 | ||
2ab85530 RZ |
1401 | # pylint: disable=W0221 |
1402 | # Some params are only meaningful for the parent class. | |
594b1259 MW |
1403 | def config(self, **params): |
1404 | super(Router, self).config(**params) | |
1405 | ||
2ab85530 | 1406 | # User did not specify the daemons directory, try to autodetect it. |
787e7624 | 1407 | self.daemondir = params.get("daemondir") |
2ab85530 | 1408 | if self.daemondir is None: |
787e7624 | 1409 | self.routertype = params.get( |
1410 | "routertype", self.config_defaults.get("topogen", "routertype") | |
1411 | ) | |
622c4996 | 1412 | self._config_frr(**params) |
594b1259 | 1413 | else: |
2ab85530 | 1414 | # Test the provided path |
787e7624 | 1415 | zpath = os.path.join(self.daemondir, "zebra") |
2ab85530 | 1416 | if not os.path.isfile(zpath): |
787e7624 | 1417 | raise Exception("No zebra binary found in {}".format(zpath)) |
2ab85530 | 1418 | # Allow user to specify routertype when the path was specified. |
787e7624 | 1419 | if params.get("routertype") is not None: |
1420 | self.routertype = params.get("routertype") | |
2ab85530 | 1421 | |
594b1259 | 1422 | # Set ownership of config files |
787e7624 | 1423 | self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype)) |
2ab85530 | 1424 | |
594b1259 | 1425 | def terminate(self): |
cd79342c | 1426 | # Stop running FRR daemons |
99561211 | 1427 | self.stopRouter() |
594b1259 | 1428 | super(Router, self).terminate() |
49581587 | 1429 | os.system("chmod -R go+rw " + self.logdir) |
b0f0d980 | 1430 | |
cf865d1b | 1431 | # Return count of running daemons |
f033a78a DL |
1432 | def listDaemons(self): |
1433 | ret = [] | |
a53c08bc CH |
1434 | rc, stdout, _ = self.cmd_status( |
1435 | "ls -1 /var/run/%s/*.pid" % self.routertype, warn=False | |
1436 | ) | |
49581587 CH |
1437 | if rc: |
1438 | return ret | |
1439 | for d in stdout.strip().split("\n"): | |
1440 | pidfile = d.strip() | |
1441 | try: | |
1442 | pid = int(self.cmd_raises("cat %s" % pidfile, warn=False).strip()) | |
1443 | name = os.path.basename(pidfile[:-4]) | |
1444 | ||
1445 | # probably not compatible with bsd. | |
1446 | rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False) | |
1447 | if rc: | |
a53c08bc CH |
1448 | logger.warning( |
1449 | "%s: %s exited leaving pidfile %s (%s)", | |
1450 | self.name, | |
1451 | name, | |
1452 | pidfile, | |
1453 | pid, | |
1454 | ) | |
49581587 CH |
1455 | self.cmd("rm -- " + pidfile) |
1456 | else: | |
1457 | ret.append((name, pid)) | |
1458 | except (subprocess.CalledProcessError, ValueError): | |
1459 | pass | |
f033a78a | 1460 | return ret |
cf865d1b | 1461 | |
49581587 | 1462 | def stopRouter(self, assertOnError=True, minErrorVersion="5.1"): |
cf865d1b | 1463 | # Stop Running FRR Daemons |
49581587 CH |
1464 | running = self.listDaemons() |
1465 | if not running: | |
1466 | return "" | |
1467 | ||
1468 | logger.info("%s: stopping %s", self.name, ", ".join([x[0] for x in running])) | |
1469 | for name, pid in running: | |
1470 | logger.info("{}: sending SIGTERM to {}".format(self.name, name)) | |
1471 | try: | |
1472 | os.kill(pid, signal.SIGTERM) | |
1473 | except OSError as err: | |
a53c08bc CH |
1474 | logger.info( |
1475 | "%s: could not kill %s (%s): %s", self.name, name, pid, str(err) | |
1476 | ) | |
49581587 CH |
1477 | |
1478 | running = self.listDaemons() | |
1479 | if running: | |
e9a59a2a | 1480 | for _ in range(0, 30): |
701a0192 | 1481 | sleep( |
49581587 | 1482 | 0.5, |
701a0192 | 1483 | "{}: waiting for daemons stopping: {}".format( |
49581587 | 1484 | self.name, ", ".join([x[0] for x in running]) |
701a0192 | 1485 | ), |
1486 | ) | |
f033a78a | 1487 | running = self.listDaemons() |
49581587 CH |
1488 | if not running: |
1489 | break | |
f033a78a | 1490 | |
49581587 CH |
1491 | if not running: |
1492 | return "" | |
cf865d1b | 1493 | |
a53c08bc CH |
1494 | logger.warning( |
1495 | "%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running]) | |
1496 | ) | |
49581587 CH |
1497 | for name, pid in running: |
1498 | pidfile = "/var/run/{}/{}.pid".format(self.routertype, name) | |
1499 | logger.info("%s: killing %s", self.name, name) | |
1500 | self.cmd("kill -SIGBUS %d" % pid) | |
1501 | self.cmd("rm -- " + pidfile) | |
cf865d1b | 1502 | |
a53c08bc CH |
1503 | sleep( |
1504 | 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name | |
1505 | ) | |
f033a78a DL |
1506 | |
1507 | errors = self.checkRouterCores(reportOnce=True) | |
1508 | if self.checkRouterVersion("<", minErrorVersion): | |
1509 | # ignore errors in old versions | |
1510 | errors = "" | |
49581587 | 1511 | if assertOnError and (errors is not None) and len(errors) > 0: |
f033a78a | 1512 | assert "Errors found - details follow:" == 0, errors |
83c26937 | 1513 | return errors |
f76774ec | 1514 | |
594b1259 MW |
1515 | def removeIPs(self): |
1516 | for interface in self.intfNames(): | |
49581587 CH |
1517 | try: |
1518 | self.intf_ip_cmd(interface, "ip address flush " + interface) | |
1519 | except Exception as ex: | |
1520 | logger.error("%s can't remove IPs %s", self, str(ex)) | |
1521 | # pdb.set_trace() | |
1522 | # assert False, "can't remove IPs %s" % str(ex) | |
8dd5077d PG |
1523 | |
1524 | def checkCapability(self, daemon, param): | |
1525 | if param is not None: | |
1526 | daemon_path = os.path.join(self.daemondir, daemon) | |
787e7624 | 1527 | daemon_search_option = param.replace("-", "") |
1528 | output = self.cmd( | |
1529 | "{0} -h | grep {1}".format(daemon_path, daemon_search_option) | |
1530 | ) | |
8dd5077d PG |
1531 | if daemon_search_option not in output: |
1532 | return False | |
1533 | return True | |
1534 | ||
1535 | def loadConf(self, daemon, source=None, param=None): | |
02547745 CH |
1536 | """Enabled and set config for a daemon. |
1537 | ||
1538 | Arranges for loading of daemon configuration from the specified source. Possible | |
1539 | `source` values are `None` for an empty config file, a path name which is used | |
1540 | directly, or a file name with no path components which is first looked for | |
1541 | directly and then looked for under a sub-directory named after router. | |
1542 | """ | |
1543 | ||
49581587 | 1544 | # Unfortunately this API allowsfor source to not exist for any and all routers. |
02547745 CH |
1545 | if source: |
1546 | head, tail = os.path.split(source) | |
1547 | if not head and not self.path_exists(tail): | |
1548 | script_dir = os.environ["PYTEST_TOPOTEST_SCRIPTDIR"] | |
1549 | router_relative = os.path.join(script_dir, self.name, tail) | |
1550 | if self.path_exists(router_relative): | |
1551 | source = router_relative | |
1552 | self.logger.info( | |
1553 | "using router relative configuration: {}".format(source) | |
1554 | ) | |
49581587 | 1555 | |
594b1259 | 1556 | # print "Daemons before:", self.daemons |
a4b4bb50 JAG |
1557 | if daemon in self.daemons.keys() or daemon == "frr": |
1558 | if daemon == "frr": | |
1559 | self.unified_config = 1 | |
1560 | else: | |
1561 | self.daemons[daemon] = 1 | |
8dd5077d PG |
1562 | if param is not None: |
1563 | self.daemons_options[daemon] = param | |
49581587 CH |
1564 | conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon) |
1565 | if source is None or not os.path.exists(source): | |
a4b4bb50 JAG |
1566 | if daemon == "frr" or not self.unified_config: |
1567 | self.cmd_raises("rm -f " + conf_file) | |
1568 | self.cmd_raises("touch " + conf_file) | |
594b1259 | 1569 | else: |
49581587 | 1570 | self.cmd_raises("cp {} {}".format(source, conf_file)) |
a4b4bb50 JAG |
1571 | |
1572 | if not self.unified_config or daemon == "frr": | |
1573 | self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file)) | |
1574 | self.cmd_raises("chmod 664 {}".format(conf_file)) | |
1575 | ||
92be50e6 | 1576 | if (daemon == "snmpd") and (self.routertype == "frr"): |
49581587 | 1577 | # /etc/snmp is private mount now |
a4b4bb50 | 1578 | self.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf') |
49581587 CH |
1579 | self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf') |
1580 | ||
787e7624 | 1581 | if (daemon == "zebra") and (self.daemons["staticd"] == 0): |
a2a1134c | 1582 | # Add staticd with zebra - if it exists |
49581587 CH |
1583 | try: |
1584 | staticd_path = os.path.join(self.daemondir, "staticd") | |
1585 | except: | |
1586 | pdb.set_trace() | |
1587 | ||
a2a1134c | 1588 | if os.path.isfile(staticd_path): |
787e7624 | 1589 | self.daemons["staticd"] = 1 |
1590 | self.daemons_options["staticd"] = "" | |
2c805e6c | 1591 | # Auto-Started staticd has no config, so it will read from zebra config |
594b1259 | 1592 | else: |
787e7624 | 1593 | logger.info("No daemon {} known".format(daemon)) |
594b1259 | 1594 | # print "Daemons after:", self.daemons |
e1dfa45e | 1595 | |
3f950192 | 1596 | def runInWindow(self, cmd, title=None): |
49581587 | 1597 | return self.run_in_window(cmd, title) |
3f950192 | 1598 | |
9711fc7e | 1599 | def startRouter(self, tgen=None): |
a4b4bb50 JAG |
1600 | if self.unified_config: |
1601 | self.cmd( | |
1602 | 'echo "service integrated-vtysh-config" >> /etc/%s/vtysh.conf' | |
1603 | % self.routertype | |
1604 | ) | |
1605 | else: | |
1606 | # Disable integrated-vtysh-config | |
1607 | self.cmd( | |
1608 | 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf' | |
1609 | % self.routertype | |
1610 | ) | |
1611 | ||
787e7624 | 1612 | self.cmd( |
1613 | "chown %s:%svty /etc/%s/vtysh.conf" | |
1614 | % (self.routertype, self.routertype, self.routertype) | |
1615 | ) | |
13e1fc49 | 1616 | # TODO remove the following lines after all tests are migrated to Topogen. |
594b1259 | 1617 | # Try to find relevant old logfiles in /tmp and delete them |
787e7624 | 1618 | map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name))) |
594b1259 | 1619 | # Remove old core files |
787e7624 | 1620 | map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name))) |
594b1259 MW |
1621 | # Remove IP addresses from OS first - we have them in zebra.conf |
1622 | self.removeIPs() | |
1623 | # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher | |
1624 | # No error - but return message and skip all the tests | |
787e7624 | 1625 | if self.daemons["ldpd"] == 1: |
1626 | ldpd_path = os.path.join(self.daemondir, "ldpd") | |
2ab85530 | 1627 | if not os.path.isfile(ldpd_path): |
222ea88b | 1628 | logger.info("LDP Test, but no ldpd compiled or installed") |
594b1259 | 1629 | return "LDP Test, but no ldpd compiled or installed" |
dd4eca4d | 1630 | |
787e7624 | 1631 | if version_cmp(platform.release(), "4.5") < 0: |
222ea88b | 1632 | logger.info("LDP Test need Linux Kernel 4.5 minimum") |
45619ee3 | 1633 | return "LDP Test need Linux Kernel 4.5 minimum" |
9711fc7e LB |
1634 | # Check if have mpls |
1635 | if tgen != None: | |
1636 | self.hasmpls = tgen.hasmpls | |
1637 | if self.hasmpls != True: | |
787e7624 | 1638 | logger.info( |
1639 | "LDP/MPLS Tests will be skipped, platform missing module(s)" | |
1640 | ) | |
9711fc7e LB |
1641 | else: |
1642 | # Test for MPLS Kernel modules available | |
1643 | self.hasmpls = False | |
787e7624 | 1644 | if not module_present("mpls-router"): |
1645 | logger.info( | |
1646 | "MPLS tests will not run (missing mpls-router kernel module)" | |
1647 | ) | |
1648 | elif not module_present("mpls-iptunnel"): | |
1649 | logger.info( | |
1650 | "MPLS tests will not run (missing mpls-iptunnel kernel module)" | |
1651 | ) | |
9711fc7e LB |
1652 | else: |
1653 | self.hasmpls = True | |
1654 | if self.hasmpls != True: | |
1655 | return "LDP/MPLS Tests need mpls kernel modules" | |
49581587 CH |
1656 | |
1657 | # Really want to use sysctl_atleast here, but only when MPLS is actually being | |
1658 | # used | |
787e7624 | 1659 | self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels") |
44a592b2 | 1660 | |
3f950192 CH |
1661 | shell_routers = g_extra_config["shell"] |
1662 | if "all" in shell_routers or self.name in shell_routers: | |
0bc76852 | 1663 | self.run_in_window(os.getenv("SHELL", "bash"), title="sh-%s" % self.name) |
3f950192 | 1664 | |
787e7624 | 1665 | if self.daemons["eigrpd"] == 1: |
1666 | eigrpd_path = os.path.join(self.daemondir, "eigrpd") | |
44a592b2 | 1667 | if not os.path.isfile(eigrpd_path): |
222ea88b | 1668 | logger.info("EIGRP Test, but no eigrpd compiled or installed") |
44a592b2 MW |
1669 | return "EIGRP Test, but no eigrpd compiled or installed" |
1670 | ||
787e7624 | 1671 | if self.daemons["bfdd"] == 1: |
1672 | bfdd_path = os.path.join(self.daemondir, "bfdd") | |
4d45d6d3 RZ |
1673 | if not os.path.isfile(bfdd_path): |
1674 | logger.info("BFD Test, but no bfdd compiled or installed") | |
1675 | return "BFD Test, but no bfdd compiled or installed" | |
1676 | ||
1726edc3 CH |
1677 | status = self.startRouterDaemons(tgen=tgen) |
1678 | ||
1679 | vtysh_routers = g_extra_config["vtysh"] | |
1680 | if "all" in vtysh_routers or self.name in vtysh_routers: | |
0bc76852 | 1681 | self.run_in_window("vtysh", title="vt-%s" % self.name) |
1726edc3 | 1682 | |
a4b4bb50 JAG |
1683 | if self.unified_config: |
1684 | self.cmd("vtysh -f /etc/frr/frr.conf") | |
1685 | ||
1726edc3 | 1686 | return status |
aa5261bf | 1687 | |
aa5261bf RZ |
1688 | def getStdErr(self, daemon): |
1689 | return self.getLog("err", daemon) | |
1690 | ||
1691 | def getStdOut(self, daemon): | |
1692 | return self.getLog("out", daemon) | |
1693 | ||
1694 | def getLog(self, log, daemon): | |
1695 | return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log)) | |
1696 | ||
0c449b01 | 1697 | def startRouterDaemons(self, daemons=None, tgen=None): |
49581587 | 1698 | "Starts FRR daemons for this router." |
e1dfa45e | 1699 | |
0ba1d257 | 1700 | asan_abort = g_extra_config["asan_abort"] |
0b25370e | 1701 | gdb_breakpoints = g_extra_config["gdb_breakpoints"] |
3f950192 CH |
1702 | gdb_daemons = g_extra_config["gdb_daemons"] |
1703 | gdb_routers = g_extra_config["gdb_routers"] | |
e58133a7 CH |
1704 | valgrind_extra = g_extra_config["valgrind_extra"] |
1705 | valgrind_memleaks = g_extra_config["valgrind_memleaks"] | |
0ba1d257 | 1706 | strace_daemons = g_extra_config["strace_daemons"] |
3f950192 | 1707 | |
49581587 CH |
1708 | # Get global bundle data |
1709 | if not self.path_exists("/etc/frr/support_bundle_commands.conf"): | |
1710 | # Copy global value if was covered by namespace mount | |
1711 | bundle_data = "" | |
1712 | if os.path.exists("/etc/frr/support_bundle_commands.conf"): | |
1713 | with open("/etc/frr/support_bundle_commands.conf", "r") as rf: | |
1714 | bundle_data = rf.read() | |
1715 | self.cmd_raises( | |
1716 | "cat > /etc/frr/support_bundle_commands.conf", | |
1717 | stdin=bundle_data, | |
701a0192 | 1718 | ) |
c39fe454 | 1719 | |
e1dfa45e LB |
1720 | # Starts actual daemons without init (ie restart) |
1721 | # cd to per node directory | |
49581587 CH |
1722 | self.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self.logdir, self.name)) |
1723 | self.set_cwd("{}/{}".format(self.logdir, self.name)) | |
787e7624 | 1724 | self.cmd("umask 000") |
aa5261bf | 1725 | |
787e7624 | 1726 | # Re-enable to allow for report per run |
2a59a86b | 1727 | self.reportCores = True |
aa5261bf RZ |
1728 | |
1729 | # XXX: glue code forward ported from removed function. | |
fb80b81b | 1730 | if self.version == None: |
787e7624 | 1731 | self.version = self.cmd( |
c39fe454 | 1732 | os.path.join(self.daemondir, "bgpd") + " -v" |
787e7624 | 1733 | ).split()[2] |
1734 | logger.info("{}: running version: {}".format(self.name, self.version)) | |
aa5261bf RZ |
1735 | # If `daemons` was specified then some upper API called us with |
1736 | # specific daemons, otherwise just use our own configuration. | |
1737 | daemons_list = [] | |
3f950192 | 1738 | if daemons is not None: |
bb91e9c0 MS |
1739 | daemons_list = daemons |
1740 | else: | |
aa5261bf RZ |
1741 | # Append all daemons configured. |
1742 | for daemon in self.daemons: | |
1743 | if self.daemons[daemon] == 1: | |
1744 | daemons_list.append(daemon) | |
1745 | ||
3f950192 CH |
1746 | def start_daemon(daemon, extra_opts=None): |
1747 | daemon_opts = self.daemons_options.get(daemon, "") | |
1748 | rediropt = " > {0}.out 2> {0}.err".format(daemon) | |
1749 | if daemon == "snmpd": | |
1750 | binary = "/usr/sbin/snmpd" | |
1751 | cmdenv = "" | |
1752 | cmdopt = "{} -C -c /etc/frr/snmpd.conf -p ".format( | |
1753 | daemon_opts | |
1754 | ) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self.routertype) | |
1755 | else: | |
1756 | binary = os.path.join(self.daemondir, daemon) | |
e58133a7 | 1757 | |
0ba1d257 CH |
1758 | cmdenv = "ASAN_OPTIONS=" |
1759 | if asan_abort: | |
1760 | cmdenv = "abort_on_error=1:" | |
a53c08bc CH |
1761 | cmdenv += "log_path={0}/{1}.{2}.asan ".format( |
1762 | self.logdir, self.name, daemon | |
1763 | ) | |
0ba1d257 | 1764 | |
e58133a7 | 1765 | if valgrind_memleaks: |
a53c08bc CH |
1766 | this_dir = os.path.dirname( |
1767 | os.path.abspath(os.path.realpath(__file__)) | |
1768 | ) | |
1769 | supp_file = os.path.abspath( | |
1770 | os.path.join(this_dir, "../../../tools/valgrind.supp") | |
1771 | ) | |
1772 | cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format( | |
1773 | daemon, self.logdir, self.name, supp_file | |
1774 | ) | |
e58133a7 | 1775 | if valgrind_extra: |
a53c08bc | 1776 | cmdenv += ( |
f2415785 | 1777 | " --gen-suppressions=all --expensive-definedness-checks=yes" |
a53c08bc | 1778 | ) |
0ba1d257 | 1779 | elif daemon in strace_daemons or "all" in strace_daemons: |
a53c08bc CH |
1780 | cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format( |
1781 | daemon, self.logdir, self.name | |
1782 | ) | |
0ba1d257 | 1783 | |
a7c0a04f | 1784 | cmdopt = "{} --command-log-always --log file:{}.log --log-level debug".format( |
3f950192 | 1785 | daemon_opts, daemon |
787e7624 | 1786 | ) |
3f950192 CH |
1787 | if extra_opts: |
1788 | cmdopt += " " + extra_opts | |
1789 | ||
1790 | if ( | |
1791 | (gdb_routers or gdb_daemons) | |
0b25370e DS |
1792 | and ( |
1793 | not gdb_routers or self.name in gdb_routers or "all" in gdb_routers | |
1794 | ) | |
1795 | and (not gdb_daemons or daemon in gdb_daemons or "all" in gdb_daemons) | |
3f950192 CH |
1796 | ): |
1797 | if daemon == "snmpd": | |
1798 | cmdopt += " -f " | |
1799 | ||
1800 | cmdopt += rediropt | |
1801 | gdbcmd = "sudo -E gdb " + binary | |
1802 | if gdb_breakpoints: | |
1803 | gdbcmd += " -ex 'set breakpoint pending on'" | |
1804 | for bp in gdb_breakpoints: | |
1805 | gdbcmd += " -ex 'b {}'".format(bp) | |
1806 | gdbcmd += " -ex 'run {}'".format(cmdopt) | |
1807 | ||
49581587 CH |
1808 | self.run_in_window(gdbcmd, daemon) |
1809 | ||
a53c08bc CH |
1810 | logger.info( |
1811 | "%s: %s %s launched in gdb window", self, self.routertype, daemon | |
1812 | ) | |
3f950192 CH |
1813 | else: |
1814 | if daemon != "snmpd": | |
1815 | cmdopt += " -d " | |
1816 | cmdopt += rediropt | |
49581587 CH |
1817 | |
1818 | try: | |
1819 | self.cmd_raises(" ".join([cmdenv, binary, cmdopt]), warn=False) | |
1820 | except subprocess.CalledProcessError as error: | |
1821 | self.logger.error( | |
1822 | '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:', | |
a53c08bc CH |
1823 | self, |
1824 | daemon, | |
1825 | error.returncode, | |
1826 | error.cmd, | |
1827 | '\n:stdout: "{}"'.format(error.stdout.strip()) | |
1828 | if error.stdout | |
1829 | else "", | |
1830 | '\n:stderr: "{}"'.format(error.stderr.strip()) | |
1831 | if error.stderr | |
1832 | else "", | |
49581587 CH |
1833 | ) |
1834 | else: | |
1835 | logger.info("%s: %s %s started", self, self.routertype, daemon) | |
3f950192 | 1836 | |
3f950192 CH |
1837 | # Start Zebra first |
1838 | if "zebra" in daemons_list: | |
1839 | start_daemon("zebra", "-s 90000000") | |
c39fe454 KK |
1840 | while "zebra" in daemons_list: |
1841 | daemons_list.remove("zebra") | |
aa5261bf | 1842 | |
a2a1134c | 1843 | # Start staticd next if required |
c39fe454 | 1844 | if "staticd" in daemons_list: |
3f950192 | 1845 | start_daemon("staticd") |
c39fe454 KK |
1846 | while "staticd" in daemons_list: |
1847 | daemons_list.remove("staticd") | |
aa5261bf | 1848 | |
92be50e6 | 1849 | if "snmpd" in daemons_list: |
49581587 CH |
1850 | # Give zerbra a chance to configure interface addresses that snmpd daemon |
1851 | # may then use. | |
1852 | time.sleep(2) | |
1853 | ||
3f950192 | 1854 | start_daemon("snmpd") |
92be50e6 BC |
1855 | while "snmpd" in daemons_list: |
1856 | daemons_list.remove("snmpd") | |
1857 | ||
49581587 CH |
1858 | if daemons is None: |
1859 | # Fix Link-Local Addresses on initial startup | |
1860 | # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this | |
1861 | _, output, _ = self.cmd_status( | |
1862 | "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done", | |
a53c08bc | 1863 | stderr=subprocess.STDOUT, |
49581587 CH |
1864 | ) |
1865 | logger.debug("Set MACs:\n%s", output) | |
aa5261bf | 1866 | |
594b1259 | 1867 | # Now start all the other daemons |
cb3e512d | 1868 | for daemon in daemons_list: |
aa5261bf | 1869 | if self.daemons[daemon] == 0: |
2ab85530 | 1870 | continue |
3f950192 | 1871 | start_daemon(daemon) |
787e7624 | 1872 | |
aa5261bf | 1873 | # Check if daemons are running. |
c39fe454 | 1874 | rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype) |
c65a7e26 KK |
1875 | if re.search(r"No such file or directory", rundaemons): |
1876 | return "Daemons are not running" | |
1877 | ||
49581587 CH |
1878 | # Update the permissions on the log files |
1879 | self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name)) | |
1880 | self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name)) | |
1881 | ||
c65a7e26 KK |
1882 | return "" |
1883 | ||
c39fe454 KK |
1884 | def killRouterDaemons( |
1885 | self, daemons, wait=True, assertOnError=True, minErrorVersion="5.1" | |
1886 | ): | |
622c4996 | 1887 | # Kill Running FRR |
c65a7e26 | 1888 | # Daemons(user specified daemon only) using SIGKILL |
c39fe454 | 1889 | rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype) |
c65a7e26 KK |
1890 | errors = "" |
1891 | daemonsNotRunning = [] | |
1892 | if re.search(r"No such file or directory", rundaemons): | |
1893 | return errors | |
1894 | for daemon in daemons: | |
1895 | if rundaemons is not None and daemon in rundaemons: | |
1896 | numRunning = 0 | |
701a0192 | 1897 | dmns = rundaemons.split("\n") |
cd79342c MS |
1898 | # Exclude empty string at end of list |
1899 | for d in dmns[:-1]: | |
c65a7e26 | 1900 | if re.search(r"%s" % daemon, d): |
6c5045ce MW |
1901 | daemonpidfile = d.rstrip() |
1902 | daemonpid = self.cmd("cat %s" % daemonpidfile).rstrip() | |
c39fe454 KK |
1903 | if daemonpid.isdigit() and pid_exists(int(daemonpid)): |
1904 | logger.info( | |
1905 | "{}: killing {}".format( | |
1906 | self.name, | |
6c5045ce | 1907 | os.path.basename(daemonpidfile.rsplit(".", 1)[0]), |
c39fe454 KK |
1908 | ) |
1909 | ) | |
6c5045ce | 1910 | os.kill(int(daemonpid), signal.SIGKILL) |
c65a7e26 KK |
1911 | if pid_exists(int(daemonpid)): |
1912 | numRunning += 1 | |
c9f92703 | 1913 | while wait and numRunning > 0: |
c39fe454 KK |
1914 | sleep( |
1915 | 2, | |
1916 | "{}: waiting for {} daemon to be stopped".format( | |
1917 | self.name, daemon | |
1918 | ), | |
1919 | ) | |
cd79342c | 1920 | |
c65a7e26 | 1921 | # 2nd round of kill if daemons didn't exit |
cd79342c | 1922 | for d in dmns[:-1]: |
c65a7e26 | 1923 | if re.search(r"%s" % daemon, d): |
c39fe454 KK |
1924 | daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip() |
1925 | if daemonpid.isdigit() and pid_exists( | |
1926 | int(daemonpid) | |
1927 | ): | |
1928 | logger.info( | |
1929 | "{}: killing {}".format( | |
1930 | self.name, | |
1931 | os.path.basename( | |
1932 | d.rstrip().rsplit(".", 1)[0] | |
1933 | ), | |
1934 | ) | |
1935 | ) | |
6c5045ce | 1936 | os.kill(int(daemonpid), signal.SIGKILL) |
c9f92703 DS |
1937 | if daemonpid.isdigit() and not pid_exists( |
1938 | int(daemonpid) | |
1939 | ): | |
1940 | numRunning -= 1 | |
6c5045ce | 1941 | self.cmd("rm -- {}".format(daemonpidfile)) |
c65a7e26 KK |
1942 | if wait: |
1943 | errors = self.checkRouterCores(reportOnce=True) | |
c39fe454 KK |
1944 | if self.checkRouterVersion("<", minErrorVersion): |
1945 | # ignore errors in old versions | |
c65a7e26 KK |
1946 | errors = "" |
1947 | if assertOnError and len(errors) > 0: | |
1948 | assert "Errors found - details follow:" == 0, errors | |
c65a7e26 KK |
1949 | else: |
1950 | daemonsNotRunning.append(daemon) | |
1951 | if len(daemonsNotRunning) > 0: | |
c39fe454 | 1952 | errors = errors + "Daemons are not running", daemonsNotRunning |
c65a7e26 KK |
1953 | |
1954 | return errors | |
1955 | ||
2a59a86b LB |
1956 | def checkRouterCores(self, reportLeaks=True, reportOnce=False): |
1957 | if reportOnce and not self.reportCores: | |
1958 | return | |
1959 | reportMade = False | |
83c26937 | 1960 | traces = "" |
f76774ec | 1961 | for daemon in self.daemons: |
787e7624 | 1962 | if self.daemons[daemon] == 1: |
f76774ec | 1963 | # Look for core file |
787e7624 | 1964 | corefiles = glob.glob( |
1965 | "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon) | |
1966 | ) | |
1967 | if len(corefiles) > 0: | |
79f6fdeb | 1968 | backtrace = gdb_core(self, daemon, corefiles) |
787e7624 | 1969 | traces = ( |
1970 | traces | |
1971 | + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s" | |
1972 | % (self.name, daemon, backtrace) | |
1973 | ) | |
2a59a86b | 1974 | reportMade = True |
f76774ec LB |
1975 | elif reportLeaks: |
1976 | log = self.getStdErr(daemon) | |
1977 | if "memstats" in log: | |
787e7624 | 1978 | sys.stderr.write( |
1979 | "%s: %s has memory leaks:\n" % (self.name, daemon) | |
1980 | ) | |
1981 | traces = traces + "\n%s: %s has memory leaks:\n" % ( | |
1982 | self.name, | |
1983 | daemon, | |
1984 | ) | |
f76774ec | 1985 | log = re.sub("core_handler: ", "", log) |
787e7624 | 1986 | log = re.sub( |
1987 | r"(showing active allocations in memory group [a-zA-Z0-9]+)", | |
1988 | r"\n ## \1", | |
1989 | log, | |
1990 | ) | |
f76774ec LB |
1991 | log = re.sub("memstats: ", " ", log) |
1992 | sys.stderr.write(log) | |
2a59a86b | 1993 | reportMade = True |
f76774ec | 1994 | # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found |
787e7624 | 1995 | if checkAddressSanitizerError( |
be2656ed | 1996 | self.getStdErr(daemon), self.name, daemon, self.logdir |
787e7624 | 1997 | ): |
1998 | sys.stderr.write( | |
1999 | "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon) | |
2000 | ) | |
2001 | traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % ( | |
2002 | self.name, | |
2003 | daemon, | |
2004 | ) | |
2a59a86b LB |
2005 | reportMade = True |
2006 | if reportMade: | |
2007 | self.reportCores = False | |
83c26937 | 2008 | return traces |
f76774ec | 2009 | |
594b1259 | 2010 | def checkRouterRunning(self): |
597cabb7 MW |
2011 | "Check if router daemons are running and collect crashinfo they don't run" |
2012 | ||
594b1259 MW |
2013 | global fatal_error |
2014 | ||
787e7624 | 2015 | daemonsRunning = self.cmd( |
2016 | 'vtysh -c "show logging" | grep "Logging configuration for"' | |
2017 | ) | |
4942f298 MW |
2018 | # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found |
2019 | if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"): | |
2020 | return "%s: vtysh killed by AddressSanitizer" % (self.name) | |
2021 | ||
594b1259 | 2022 | for daemon in self.daemons: |
662c0576 KS |
2023 | if daemon == "snmpd": |
2024 | continue | |
594b1259 MW |
2025 | if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning): |
2026 | sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon)) | |
11761ab0 | 2027 | if daemon == "staticd": |
787e7624 | 2028 | sys.stderr.write( |
2029 | "You may have a copy of staticd installed but are attempting to test against\n" | |
2030 | ) | |
2031 | sys.stderr.write( | |
2032 | "a version of FRR that does not have staticd, please cleanup the install dir\n" | |
2033 | ) | |
d2132114 | 2034 | |
594b1259 | 2035 | # Look for core file |
787e7624 | 2036 | corefiles = glob.glob( |
2037 | "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon) | |
2038 | ) | |
2039 | if len(corefiles) > 0: | |
79f6fdeb | 2040 | gdb_core(self, daemon, corefiles) |
594b1259 MW |
2041 | else: |
2042 | # No core found - If we find matching logfile in /tmp, then print last 20 lines from it. | |
787e7624 | 2043 | if os.path.isfile( |
2044 | "{}/{}/{}.log".format(self.logdir, self.name, daemon) | |
2045 | ): | |
2046 | log_tail = subprocess.check_output( | |
2047 | [ | |
2048 | "tail -n20 {}/{}/{}.log 2> /dev/null".format( | |
2049 | self.logdir, self.name, daemon | |
2050 | ) | |
2051 | ], | |
2052 | shell=True, | |
2053 | ) | |
2054 | sys.stderr.write( | |
2055 | "\nFrom %s %s %s log file:\n" | |
2056 | % (self.routertype, self.name, daemon) | |
2057 | ) | |
594b1259 | 2058 | sys.stderr.write("%s\n" % log_tail) |
4942f298 | 2059 | |
597cabb7 | 2060 | # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found |
787e7624 | 2061 | if checkAddressSanitizerError( |
be2656ed | 2062 | self.getStdErr(daemon), self.name, daemon, self.logdir |
787e7624 | 2063 | ): |
2064 | return "%s: Daemon %s not running - killed by AddressSanitizer" % ( | |
2065 | self.name, | |
2066 | daemon, | |
2067 | ) | |
84379e8e | 2068 | |
594b1259 MW |
2069 | return "%s: Daemon %s not running" % (self.name, daemon) |
2070 | return "" | |
fb80b81b LB |
2071 | |
2072 | def checkRouterVersion(self, cmpop, version): | |
2073 | """ | |
2074 | Compares router version using operation `cmpop` with `version`. | |
2075 | Valid `cmpop` values: | |
2076 | * `>=`: has the same version or greater | |
2077 | * '>': has greater version | |
2078 | * '=': has the same version | |
2079 | * '<': has a lesser version | |
2080 | * '<=': has the same version or lesser | |
2081 | ||
2082 | Usage example: router.checkRouterVersion('>', '1.0') | |
2083 | """ | |
6bfe4b8b MW |
2084 | |
2085 | # Make sure we have version information first | |
2086 | if self.version == None: | |
787e7624 | 2087 | self.version = self.cmd( |
2088 | os.path.join(self.daemondir, "bgpd") + " -v" | |
2089 | ).split()[2] | |
2090 | logger.info("{}: running version: {}".format(self.name, self.version)) | |
6bfe4b8b | 2091 | |
fb80b81b | 2092 | rversion = self.version |
11761ab0 | 2093 | if rversion == None: |
fb80b81b LB |
2094 | return False |
2095 | ||
2096 | result = version_cmp(rversion, version) | |
787e7624 | 2097 | if cmpop == ">=": |
fb80b81b | 2098 | return result >= 0 |
787e7624 | 2099 | if cmpop == ">": |
fb80b81b | 2100 | return result > 0 |
787e7624 | 2101 | if cmpop == "=": |
fb80b81b | 2102 | return result == 0 |
787e7624 | 2103 | if cmpop == "<": |
fb80b81b | 2104 | return result < 0 |
787e7624 | 2105 | if cmpop == "<": |
fb80b81b | 2106 | return result < 0 |
787e7624 | 2107 | if cmpop == "<=": |
fb80b81b LB |
2108 | return result <= 0 |
2109 | ||
594b1259 MW |
2110 | def get_ipv6_linklocal(self): |
2111 | "Get LinkLocal Addresses from interfaces" | |
2112 | ||
2113 | linklocal = [] | |
2114 | ||
787e7624 | 2115 | ifaces = self.cmd("ip -6 address") |
594b1259 | 2116 | # Fix newlines (make them all the same) |
787e7624 | 2117 | ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines() |
2118 | interface = "" | |
2119 | ll_per_if_count = 0 | |
594b1259 | 2120 | for line in ifaces: |
fd03dacd | 2121 | m = re.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line) |
594b1259 MW |
2122 | if m: |
2123 | interface = m.group(1) | |
2124 | ll_per_if_count = 0 | |
787e7624 | 2125 | m = re.search( |
2126 | "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link", | |
2127 | line, | |
2128 | ) | |
594b1259 MW |
2129 | if m: |
2130 | local = m.group(1) | |
2131 | ll_per_if_count += 1 | |
787e7624 | 2132 | if ll_per_if_count > 1: |
594b1259 MW |
2133 | linklocal += [["%s-%s" % (interface, ll_per_if_count), local]] |
2134 | else: | |
2135 | linklocal += [[interface, local]] | |
2136 | return linklocal | |
787e7624 | 2137 | |
80eeefb7 MW |
2138 | def daemon_available(self, daemon): |
2139 | "Check if specified daemon is installed (and for ldp if kernel supports MPLS)" | |
2140 | ||
2ab85530 RZ |
2141 | daemon_path = os.path.join(self.daemondir, daemon) |
2142 | if not os.path.isfile(daemon_path): | |
80eeefb7 | 2143 | return False |
787e7624 | 2144 | if daemon == "ldpd": |
2145 | if version_cmp(platform.release(), "4.5") < 0: | |
b431b554 | 2146 | return False |
787e7624 | 2147 | if not module_present("mpls-router", load=False): |
80eeefb7 | 2148 | return False |
787e7624 | 2149 | if not module_present("mpls-iptunnel", load=False): |
b431b554 | 2150 | return False |
80eeefb7 | 2151 | return True |
f2d6ce41 | 2152 | |
80eeefb7 | 2153 | def get_routertype(self): |
622c4996 | 2154 | "Return the type of Router (frr)" |
80eeefb7 MW |
2155 | |
2156 | return self.routertype | |
787e7624 | 2157 | |
50c40bde MW |
2158 | def report_memory_leaks(self, filename_prefix, testscript): |
2159 | "Report Memory Leaks to file prefixed with given string" | |
2160 | ||
2161 | leakfound = False | |
2162 | filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt" | |
2163 | for daemon in self.daemons: | |
787e7624 | 2164 | if self.daemons[daemon] == 1: |
50c40bde MW |
2165 | log = self.getStdErr(daemon) |
2166 | if "memstats" in log: | |
2167 | # Found memory leak | |
787e7624 | 2168 | logger.info( |
2169 | "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log) | |
2170 | ) | |
50c40bde MW |
2171 | if not leakfound: |
2172 | leakfound = True | |
2173 | # Check if file already exists | |
2174 | fileexists = os.path.isfile(filename) | |
2175 | leakfile = open(filename, "a") | |
2176 | if not fileexists: | |
2177 | # New file - add header | |
787e7624 | 2178 | leakfile.write( |
2179 | "# Memory Leak Detection for topotest %s\n\n" | |
2180 | % testscript | |
2181 | ) | |
50c40bde MW |
2182 | leakfile.write("## Router %s\n" % self.name) |
2183 | leakfile.write("### Process %s\n" % daemon) | |
2184 | log = re.sub("core_handler: ", "", log) | |
787e7624 | 2185 | log = re.sub( |
2186 | r"(showing active allocations in memory group [a-zA-Z0-9]+)", | |
2187 | r"\n#### \1\n", | |
2188 | log, | |
2189 | ) | |
50c40bde MW |
2190 | log = re.sub("memstats: ", " ", log) |
2191 | leakfile.write(log) | |
2192 | leakfile.write("\n") | |
2193 | if leakfound: | |
2194 | leakfile.close() | |
80eeefb7 | 2195 | |
787e7624 | 2196 | |
61196140 | 2197 | def frr_unicode(s): |
701a0192 | 2198 | """Convert string to unicode, depending on python version""" |
61196140 MS |
2199 | if sys.version_info[0] > 2: |
2200 | return s | |
2201 | else: | |
49581587 | 2202 | return unicode(s) # pylint: disable=E0602 |
c8e5983d CH |
2203 | |
2204 | ||
2205 | def is_mapping(o): | |
2206 | return isinstance(o, Mapping) |