]> git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/lib/topotest.py
Merge pull request #12780 from opensourcerouting/spdx-license-id
[mirror_frr.git] / tests / topotests / lib / topotest.py
1 #!/usr/bin/env python
2 # SPDX-License-Identifier: ISC
3
4 #
5 # topotest.py
6 # Library of helper functions for NetDEF Topology Tests
7 #
8 # Copyright (c) 2016 by
9 # Network Device Education Foundation, Inc. ("NetDEF")
10 #
11
12 import difflib
13 import errno
14 import functools
15 import glob
16 import json
17 import os
18 import pdb
19 import platform
20 import re
21 import resource
22 import signal
23 import subprocess
24 import sys
25 import tempfile
26 import time
27 from copy import deepcopy
28
29 import lib.topolog as topolog
30 from lib.topolog import logger
31
32 if sys.version_info[0] > 2:
33 import configparser
34 from collections.abc import Mapping
35 else:
36 import ConfigParser as configparser
37 from collections import Mapping
38
39 from lib import micronet
40 from lib.micronet_compat import Node
41
42 g_extra_config = {}
43
44
45 def get_logs_path(rundir):
46 logspath = topolog.get_test_logdir()
47 return os.path.join(rundir, logspath)
48
49
50 def gdb_core(obj, daemon, corefiles):
51 gdbcmds = """
52 info threads
53 bt full
54 disassemble
55 up
56 disassemble
57 up
58 disassemble
59 up
60 disassemble
61 up
62 disassemble
63 up
64 disassemble
65 """
66 gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")]
67 gdbcmds = [item for sl in gdbcmds for item in sl]
68
69 daemon_path = os.path.join(obj.daemondir, daemon)
70 backtrace = subprocess.check_output(
71 ["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds
72 )
73 sys.stderr.write(
74 "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon)
75 )
76 sys.stderr.write("%s" % backtrace)
77 return backtrace
78
79
80 class json_cmp_result(object):
81 "json_cmp result class for better assertion messages"
82
83 def __init__(self):
84 self.errors = []
85
86 def add_error(self, error):
87 "Append error message to the result"
88 for line in error.splitlines():
89 self.errors.append(line)
90
91 def has_errors(self):
92 "Returns True if there were errors, otherwise False."
93 return len(self.errors) > 0
94
95 def gen_report(self):
96 headline = ["Generated JSON diff error report:", ""]
97 return headline + self.errors
98
99 def __str__(self):
100 return (
101 "Generated JSON diff error report:\n\n\n" + "\n".join(self.errors) + "\n\n"
102 )
103
104
105 def gen_json_diff_report(d1, d2, exact=False, path="> $", acc=(0, "")):
106 """
107 Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye.
108 """
109
110 def dump_json(v):
111 if isinstance(v, (dict, list)):
112 return "\t" + "\t".join(
113 json.dumps(v, indent=4, separators=(",", ": ")).splitlines(True)
114 )
115 else:
116 return "'{}'".format(v)
117
118 def json_type(v):
119 if isinstance(v, (list, tuple)):
120 return "Array"
121 elif isinstance(v, dict):
122 return "Object"
123 elif isinstance(v, (int, float)):
124 return "Number"
125 elif isinstance(v, bool):
126 return "Boolean"
127 elif isinstance(v, str):
128 return "String"
129 elif v == None:
130 return "null"
131
132 def get_errors(other_acc):
133 return other_acc[1]
134
135 def get_errors_n(other_acc):
136 return other_acc[0]
137
138 def add_error(acc, msg, points=1):
139 return (acc[0] + points, acc[1] + "{}: {}\n".format(path, msg))
140
141 def merge_errors(acc, other_acc):
142 return (acc[0] + other_acc[0], acc[1] + other_acc[1])
143
144 def add_idx(idx):
145 return "{}[{}]".format(path, idx)
146
147 def add_key(key):
148 return "{}->{}".format(path, key)
149
150 def has_errors(other_acc):
151 return other_acc[0] > 0
152
153 if d2 == "*" or (
154 not isinstance(d1, (list, dict))
155 and not isinstance(d2, (list, dict))
156 and d1 == d2
157 ):
158 return acc
159 elif (
160 not isinstance(d1, (list, dict))
161 and not isinstance(d2, (list, dict))
162 and d1 != d2
163 ):
164 acc = add_error(
165 acc,
166 "d1 has element with value '{}' but in d2 it has value '{}'".format(d1, d2),
167 )
168 elif (
169 isinstance(d1, list)
170 and isinstance(d2, list)
171 and ((len(d2) > 0 and d2[0] == "__ordered__") or exact)
172 ):
173 if not exact:
174 del d2[0]
175 if len(d1) != len(d2):
176 acc = add_error(
177 acc,
178 "d1 has Array of length {} but in d2 it is of length {}".format(
179 len(d1), len(d2)
180 ),
181 )
182 else:
183 for idx, v1, v2 in zip(range(0, len(d1)), d1, d2):
184 acc = merge_errors(
185 acc, gen_json_diff_report(v1, v2, exact=exact, path=add_idx(idx))
186 )
187 elif isinstance(d1, list) and isinstance(d2, list):
188 if len(d1) < len(d2):
189 acc = add_error(
190 acc,
191 "d1 has Array of length {} but in d2 it is of length {}".format(
192 len(d1), len(d2)
193 ),
194 )
195 else:
196 for idx2, v2 in zip(range(0, len(d2)), d2):
197 found_match = False
198 closest_diff = None
199 closest_idx = None
200 for idx1, v1 in zip(range(0, len(d1)), d1):
201 tmp_v1 = deepcopy(v1)
202 tmp_v2 = deepcopy(v2)
203 tmp_diff = gen_json_diff_report(tmp_v1, tmp_v2, path=add_idx(idx1))
204 if not has_errors(tmp_diff):
205 found_match = True
206 del d1[idx1]
207 break
208 elif not closest_diff or get_errors_n(tmp_diff) < get_errors_n(
209 closest_diff
210 ):
211 closest_diff = tmp_diff
212 closest_idx = idx1
213 if not found_match and isinstance(v2, (list, dict)):
214 sub_error = "\n\n\t{}".format(
215 "\t".join(get_errors(closest_diff).splitlines(True))
216 )
217 acc = add_error(
218 acc,
219 (
220 "d2 has the following element at index {} which is not present in d1: "
221 + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}"
222 ).format(idx2, dump_json(v2), closest_idx, sub_error),
223 )
224 if not found_match and not isinstance(v2, (list, dict)):
225 acc = add_error(
226 acc,
227 "d2 has the following element at index {} which is not present in d1: {}".format(
228 idx2, dump_json(v2)
229 ),
230 )
231 elif isinstance(d1, dict) and isinstance(d2, dict) and exact:
232 invalid_keys_d1 = [k for k in d1.keys() if k not in d2.keys()]
233 invalid_keys_d2 = [k for k in d2.keys() if k not in d1.keys()]
234 for k in invalid_keys_d1:
235 acc = add_error(acc, "d1 has key '{}' which is not present in d2".format(k))
236 for k in invalid_keys_d2:
237 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
238 valid_keys_intersection = [k for k in d1.keys() if k in d2.keys()]
239 for k in valid_keys_intersection:
240 acc = merge_errors(
241 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
242 )
243 elif isinstance(d1, dict) and isinstance(d2, dict):
244 none_keys = [k for k, v in d2.items() if v == None]
245 none_keys_present = [k for k in d1.keys() if k in none_keys]
246 for k in none_keys_present:
247 acc = add_error(
248 acc, "d1 has key '{}' which is not supposed to be present".format(k)
249 )
250 keys = [k for k, v in d2.items() if v != None]
251 invalid_keys_intersection = [k for k in keys if k not in d1.keys()]
252 for k in invalid_keys_intersection:
253 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
254 valid_keys_intersection = [k for k in keys if k in d1.keys()]
255 for k in valid_keys_intersection:
256 acc = merge_errors(
257 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
258 )
259 else:
260 acc = add_error(
261 acc,
262 "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format(
263 json_type(d1), json_type(d2)
264 ),
265 points=2,
266 )
267
268 return acc
269
270
271 def json_cmp(d1, d2, exact=False):
272 """
273 JSON compare function. Receives two parameters:
274 * `d1`: parsed JSON data structure
275 * `d2`: parsed JSON data structure
276
277 Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
278 in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an
279 error report is generated and wrapped in a 'json_cmp_result()'. There are special
280 parameters and notations explained below which can be used to cover rather unusual
281 cases:
282
283 * when 'exact is set to 'True' then d1 and d2 are tested for equality (including
284 order within JSON Arrays)
285 * using 'null' (or 'None' in Python) as JSON Object value is checking for key
286 absence in d1
287 * using '*' as JSON Object value or Array value is checking for presence in d1
288 without checking the values
289 * using '__ordered__' as first element in a JSON Array in d2 will also check the
290 order when it is compared to an Array in d1
291 """
292
293 (errors_n, errors) = gen_json_diff_report(deepcopy(d1), deepcopy(d2), exact=exact)
294
295 if errors_n > 0:
296 result = json_cmp_result()
297 result.add_error(errors)
298 return result
299 else:
300 return None
301
302
303 def router_output_cmp(router, cmd, expected):
304 """
305 Runs `cmd` in router and compares the output with `expected`.
306 """
307 return difflines(
308 normalize_text(router.vtysh_cmd(cmd)),
309 normalize_text(expected),
310 title1="Current output",
311 title2="Expected output",
312 )
313
314
315 def router_json_cmp(router, cmd, data, exact=False):
316 """
317 Runs `cmd` that returns JSON data (normally the command ends with 'json')
318 and compare with `data` contents.
319 """
320 return json_cmp(router.vtysh_cmd(cmd, isjson=True), data, exact)
321
322
323 def run_and_expect(func, what, count=20, wait=3):
324 """
325 Run `func` and compare the result with `what`. Do it for `count` times
326 waiting `wait` seconds between tries. By default it tries 20 times with
327 3 seconds delay between tries.
328
329 Returns (True, func-return) on success or
330 (False, func-return) on failure.
331
332 ---
333
334 Helper functions to use with this function:
335 - router_output_cmp
336 - router_json_cmp
337 """
338 start_time = time.time()
339 func_name = "<unknown>"
340 if func.__class__ == functools.partial:
341 func_name = func.func.__name__
342 else:
343 func_name = func.__name__
344
345 # Just a safety-check to avoid running topotests with very
346 # small wait/count arguments.
347 wait_time = wait * count
348 if wait_time < 5:
349 assert (
350 wait_time >= 5
351 ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
352 count, wait
353 )
354
355 logger.info(
356 "'{}' polling started (interval {} secs, maximum {} tries)".format(
357 func_name, wait, count
358 )
359 )
360
361 while count > 0:
362 result = func()
363 if result != what:
364 time.sleep(wait)
365 count -= 1
366 continue
367
368 end_time = time.time()
369 logger.info(
370 "'{}' succeeded after {:.2f} seconds".format(
371 func_name, end_time - start_time
372 )
373 )
374 return (True, result)
375
376 end_time = time.time()
377 logger.error(
378 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
379 )
380 return (False, result)
381
382
383 def run_and_expect_type(func, etype, count=20, wait=3, avalue=None):
384 """
385 Run `func` and compare the result with `etype`. Do it for `count` times
386 waiting `wait` seconds between tries. By default it tries 20 times with
387 3 seconds delay between tries.
388
389 This function is used when you want to test the return type and,
390 optionally, the return value.
391
392 Returns (True, func-return) on success or
393 (False, func-return) on failure.
394 """
395 start_time = time.time()
396 func_name = "<unknown>"
397 if func.__class__ == functools.partial:
398 func_name = func.func.__name__
399 else:
400 func_name = func.__name__
401
402 # Just a safety-check to avoid running topotests with very
403 # small wait/count arguments.
404 wait_time = wait * count
405 if wait_time < 5:
406 assert (
407 wait_time >= 5
408 ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
409 count, wait
410 )
411
412 logger.info(
413 "'{}' polling started (interval {} secs, maximum wait {} secs)".format(
414 func_name, wait, int(wait * count)
415 )
416 )
417
418 while count > 0:
419 result = func()
420 if not isinstance(result, etype):
421 logger.debug(
422 "Expected result type '{}' got '{}' instead".format(etype, type(result))
423 )
424 time.sleep(wait)
425 count -= 1
426 continue
427
428 if etype != type(None) and avalue != None and result != avalue:
429 logger.debug("Expected value '{}' got '{}' instead".format(avalue, result))
430 time.sleep(wait)
431 count -= 1
432 continue
433
434 end_time = time.time()
435 logger.info(
436 "'{}' succeeded after {:.2f} seconds".format(
437 func_name, end_time - start_time
438 )
439 )
440 return (True, result)
441
442 end_time = time.time()
443 logger.error(
444 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
445 )
446 return (False, result)
447
448
449 def router_json_cmp_retry(router, cmd, data, exact=False, retry_timeout=10.0):
450 """
451 Runs `cmd` that returns JSON data (normally the command ends with 'json')
452 and compare with `data` contents. Retry by default for 10 seconds
453 """
454
455 def test_func():
456 return router_json_cmp(router, cmd, data, exact)
457
458 ok, _ = run_and_expect(test_func, None, int(retry_timeout), 1)
459 return ok
460
461
462 def int2dpid(dpid):
463 "Converting Integer to DPID"
464
465 try:
466 dpid = hex(dpid)[2:]
467 dpid = "0" * (16 - len(dpid)) + dpid
468 return dpid
469 except IndexError:
470 raise Exception(
471 "Unable to derive default datapath ID - "
472 "please either specify a dpid or use a "
473 "canonical switch name such as s23."
474 )
475
476
477 def pid_exists(pid):
478 "Check whether pid exists in the current process table."
479
480 if pid <= 0:
481 return False
482 try:
483 os.waitpid(pid, os.WNOHANG)
484 except:
485 pass
486 try:
487 os.kill(pid, 0)
488 except OSError as err:
489 if err.errno == errno.ESRCH:
490 # ESRCH == No such process
491 return False
492 elif err.errno == errno.EPERM:
493 # EPERM clearly means there's a process to deny access to
494 return True
495 else:
496 # According to "man 2 kill" possible error values are
497 # (EINVAL, EPERM, ESRCH)
498 raise
499 else:
500 return True
501
502
503 def get_textdiff(text1, text2, title1="", title2="", **opts):
504 "Returns empty string if same or formatted diff"
505
506 diff = "\n".join(
507 difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts)
508 )
509 # Clean up line endings
510 diff = os.linesep.join([s for s in diff.splitlines() if s])
511 return diff
512
513
514 def difflines(text1, text2, title1="", title2="", **opts):
515 "Wrapper for get_textdiff to avoid string transformations."
516 text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1)
517 text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1)
518 return get_textdiff(text1, text2, title1, title2, **opts)
519
520
521 def get_file(content):
522 """
523 Generates a temporary file in '/tmp' with `content` and returns the file name.
524 """
525 if isinstance(content, list) or isinstance(content, tuple):
526 content = "\n".join(content)
527 fde = tempfile.NamedTemporaryFile(mode="w", delete=False)
528 fname = fde.name
529 fde.write(content)
530 fde.close()
531 return fname
532
533
534 def normalize_text(text):
535 """
536 Strips formating spaces/tabs, carriage returns and trailing whitespace.
537 """
538 text = re.sub(r"[ \t]+", " ", text)
539 text = re.sub(r"\r", "", text)
540
541 # Remove whitespace in the middle of text.
542 text = re.sub(r"[ \t]+\n", "\n", text)
543 # Remove whitespace at the end of the text.
544 text = text.rstrip()
545
546 return text
547
548
549 def is_linux():
550 """
551 Parses unix name output to check if running on GNU/Linux.
552
553 Returns True if running on Linux, returns False otherwise.
554 """
555
556 if os.uname()[0] == "Linux":
557 return True
558 return False
559
560
561 def iproute2_is_vrf_capable():
562 """
563 Checks if the iproute2 version installed on the system is capable of
564 handling VRFs by interpreting the output of the 'ip' utility found in PATH.
565
566 Returns True if capability can be detected, returns False otherwise.
567 """
568
569 if is_linux():
570 try:
571 subp = subprocess.Popen(
572 ["ip", "route", "show", "vrf"],
573 stdout=subprocess.PIPE,
574 stderr=subprocess.PIPE,
575 stdin=subprocess.PIPE,
576 )
577 iproute2_err = subp.communicate()[1].splitlines()[0].split()[0]
578
579 if iproute2_err != "Error:":
580 return True
581 except Exception:
582 pass
583 return False
584
585 def iproute2_is_fdb_get_capable():
586 """
587 Checks if the iproute2 version installed on the system is capable of
588 handling `bridge fdb get` commands to query neigh table resolution.
589
590 Returns True if capability can be detected, returns False otherwise.
591 """
592
593 if is_linux():
594 try:
595 subp = subprocess.Popen(
596 ["bridge", "fdb", "get", "help"],
597 stdout=subprocess.PIPE,
598 stderr=subprocess.PIPE,
599 stdin=subprocess.PIPE,
600 )
601 iproute2_out = subp.communicate()[1].splitlines()[0].split()[0]
602
603 if "Usage" in str(iproute2_out):
604 return True
605 except Exception:
606 pass
607 return False
608
609 def module_present_linux(module, load):
610 """
611 Returns whether `module` is present.
612
613 If `load` is true, it will try to load it via modprobe.
614 """
615 with open("/proc/modules", "r") as modules_file:
616 if module.replace("-", "_") in modules_file.read():
617 return True
618 cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module)
619 if os.system(cmd) != 0:
620 return False
621 else:
622 return True
623
624
625 def module_present_freebsd(module, load):
626 return True
627
628
629 def module_present(module, load=True):
630 if sys.platform.startswith("linux"):
631 return module_present_linux(module, load)
632 elif sys.platform.startswith("freebsd"):
633 return module_present_freebsd(module, load)
634
635
636 def version_cmp(v1, v2):
637 """
638 Compare two version strings and returns:
639
640 * `-1`: if `v1` is less than `v2`
641 * `0`: if `v1` is equal to `v2`
642 * `1`: if `v1` is greater than `v2`
643
644 Raises `ValueError` if versions are not well formated.
645 """
646 vregex = r"(?P<whole>\d+(\.(\d+))*)"
647 v1m = re.match(vregex, v1)
648 v2m = re.match(vregex, v2)
649 if v1m is None or v2m is None:
650 raise ValueError("got a invalid version string")
651
652 # Split values
653 v1g = v1m.group("whole").split(".")
654 v2g = v2m.group("whole").split(".")
655
656 # Get the longest version string
657 vnum = len(v1g)
658 if len(v2g) > vnum:
659 vnum = len(v2g)
660
661 # Reverse list because we are going to pop the tail
662 v1g.reverse()
663 v2g.reverse()
664 for _ in range(vnum):
665 try:
666 v1n = int(v1g.pop())
667 except IndexError:
668 while v2g:
669 v2n = int(v2g.pop())
670 if v2n > 0:
671 return -1
672 break
673
674 try:
675 v2n = int(v2g.pop())
676 except IndexError:
677 if v1n > 0:
678 return 1
679 while v1g:
680 v1n = int(v1g.pop())
681 if v1n > 0:
682 return 1
683 break
684
685 if v1n > v2n:
686 return 1
687 if v1n < v2n:
688 return -1
689 return 0
690
691
692 def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None):
693 if ifaceaction:
694 str_ifaceaction = "no shutdown"
695 else:
696 str_ifaceaction = "shutdown"
697 if vrf_name == None:
698 cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
699 ifacename, str_ifaceaction
700 )
701 else:
702 cmd = (
703 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
704 ifacename, vrf_name, str_ifaceaction
705 )
706 )
707 node.run(cmd)
708
709
710 def ip4_route_zebra(node, vrf_name=None):
711 """
712 Gets an output of 'show ip route' command. It can be used
713 with comparing the output to a reference
714 """
715 if vrf_name == None:
716 tmp = node.vtysh_cmd("show ip route")
717 else:
718 tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name))
719 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
720
721 lines = output.splitlines()
722 header_found = False
723 while lines and (not lines[0].strip() or not header_found):
724 if "o - offload failure" in lines[0]:
725 header_found = True
726 lines = lines[1:]
727 return "\n".join(lines)
728
729
730 def ip6_route_zebra(node, vrf_name=None):
731 """
732 Retrieves the output of 'show ipv6 route [vrf vrf_name]', then
733 canonicalizes it by eliding link-locals.
734 """
735
736 if vrf_name == None:
737 tmp = node.vtysh_cmd("show ipv6 route")
738 else:
739 tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name))
740
741 # Mask out timestamp
742 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
743
744 # Mask out the link-local addresses
745 output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output)
746
747 lines = output.splitlines()
748 header_found = False
749 while lines and (not lines[0].strip() or not header_found):
750 if "o - offload failure" in lines[0]:
751 header_found = True
752 lines = lines[1:]
753
754 return "\n".join(lines)
755
756
757 def proto_name_to_number(protocol):
758 return {
759 "bgp": "186",
760 "isis": "187",
761 "ospf": "188",
762 "rip": "189",
763 "ripng": "190",
764 "nhrp": "191",
765 "eigrp": "192",
766 "ldp": "193",
767 "sharp": "194",
768 "pbr": "195",
769 "static": "196",
770 "ospf6": "197",
771 }.get(
772 protocol, protocol
773 ) # default return same as input
774
775
776 def ip4_route(node):
777 """
778 Gets a structured return of the command 'ip route'. It can be used in
779 conjunction with json_cmp() to provide accurate assert explanations.
780
781 Return example:
782 {
783 '10.0.1.0/24': {
784 'dev': 'eth0',
785 'via': '172.16.0.1',
786 'proto': '188',
787 },
788 '10.0.2.0/24': {
789 'dev': 'eth1',
790 'proto': 'kernel',
791 }
792 }
793 """
794 output = normalize_text(node.run("ip route")).splitlines()
795 result = {}
796 for line in output:
797 columns = line.split(" ")
798 route = result[columns[0]] = {}
799 prev = None
800 for column in columns:
801 if prev == "dev":
802 route["dev"] = column
803 if prev == "via":
804 route["via"] = column
805 if prev == "proto":
806 # translate protocol names back to numbers
807 route["proto"] = proto_name_to_number(column)
808 if prev == "metric":
809 route["metric"] = column
810 if prev == "scope":
811 route["scope"] = column
812 prev = column
813
814 return result
815
816
817 def ip4_vrf_route(node):
818 """
819 Gets a structured return of the command 'ip route show vrf {0}-cust1'.
820 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
821
822 Return example:
823 {
824 '10.0.1.0/24': {
825 'dev': 'eth0',
826 'via': '172.16.0.1',
827 'proto': '188',
828 },
829 '10.0.2.0/24': {
830 'dev': 'eth1',
831 'proto': 'kernel',
832 }
833 }
834 """
835 output = normalize_text(
836 node.run("ip route show vrf {0}-cust1".format(node.name))
837 ).splitlines()
838
839 result = {}
840 for line in output:
841 columns = line.split(" ")
842 route = result[columns[0]] = {}
843 prev = None
844 for column in columns:
845 if prev == "dev":
846 route["dev"] = column
847 if prev == "via":
848 route["via"] = column
849 if prev == "proto":
850 # translate protocol names back to numbers
851 route["proto"] = proto_name_to_number(column)
852 if prev == "metric":
853 route["metric"] = column
854 if prev == "scope":
855 route["scope"] = column
856 prev = column
857
858 return result
859
860
861 def ip6_route(node):
862 """
863 Gets a structured return of the command 'ip -6 route'. It can be used in
864 conjunction with json_cmp() to provide accurate assert explanations.
865
866 Return example:
867 {
868 '2001:db8:1::/64': {
869 'dev': 'eth0',
870 'proto': '188',
871 },
872 '2001:db8:2::/64': {
873 'dev': 'eth1',
874 'proto': 'kernel',
875 }
876 }
877 """
878 output = normalize_text(node.run("ip -6 route")).splitlines()
879 result = {}
880 for line in output:
881 columns = line.split(" ")
882 route = result[columns[0]] = {}
883 prev = None
884 for column in columns:
885 if prev == "dev":
886 route["dev"] = column
887 if prev == "via":
888 route["via"] = column
889 if prev == "proto":
890 # translate protocol names back to numbers
891 route["proto"] = proto_name_to_number(column)
892 if prev == "metric":
893 route["metric"] = column
894 if prev == "pref":
895 route["pref"] = column
896 prev = column
897
898 return result
899
900
901 def ip6_vrf_route(node):
902 """
903 Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'.
904 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
905
906 Return example:
907 {
908 '2001:db8:1::/64': {
909 'dev': 'eth0',
910 'proto': '188',
911 },
912 '2001:db8:2::/64': {
913 'dev': 'eth1',
914 'proto': 'kernel',
915 }
916 }
917 """
918 output = normalize_text(
919 node.run("ip -6 route show vrf {0}-cust1".format(node.name))
920 ).splitlines()
921 result = {}
922 for line in output:
923 columns = line.split(" ")
924 route = result[columns[0]] = {}
925 prev = None
926 for column in columns:
927 if prev == "dev":
928 route["dev"] = column
929 if prev == "via":
930 route["via"] = column
931 if prev == "proto":
932 # translate protocol names back to numbers
933 route["proto"] = proto_name_to_number(column)
934 if prev == "metric":
935 route["metric"] = column
936 if prev == "pref":
937 route["pref"] = column
938 prev = column
939
940 return result
941
942
943 def ip_rules(node):
944 """
945 Gets a structured return of the command 'ip rule'. It can be used in
946 conjunction with json_cmp() to provide accurate assert explanations.
947
948 Return example:
949 [
950 {
951 "pref": "0"
952 "from": "all"
953 },
954 {
955 "pref": "32766"
956 "from": "all"
957 },
958 {
959 "to": "3.4.5.0/24",
960 "iif": "r1-eth2",
961 "pref": "304",
962 "from": "1.2.0.0/16",
963 "proto": "zebra"
964 }
965 ]
966 """
967 output = normalize_text(node.run("ip rule")).splitlines()
968 result = []
969 for line in output:
970 columns = line.split(" ")
971
972 route = {}
973 # remove last character, since it is ':'
974 pref = columns[0][:-1]
975 route["pref"] = pref
976 prev = None
977 for column in columns:
978 if prev == "from":
979 route["from"] = column
980 if prev == "to":
981 route["to"] = column
982 if prev == "proto":
983 route["proto"] = column
984 if prev == "iif":
985 route["iif"] = column
986 if prev == "fwmark":
987 route["fwmark"] = column
988 prev = column
989
990 result.append(route)
991 return result
992
993
994 def sleep(amount, reason=None):
995 """
996 Sleep wrapper that registers in the log the amount of sleep
997 """
998 if reason is None:
999 logger.info("Sleeping for {} seconds".format(amount))
1000 else:
1001 logger.info(reason + " ({} seconds)".format(amount))
1002
1003 time.sleep(amount)
1004
1005
1006 def checkAddressSanitizerError(output, router, component, logdir=""):
1007 "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
1008
1009 def processAddressSanitizerError(asanErrorRe, output, router, component):
1010 sys.stderr.write(
1011 "%s: %s triggered an exception by AddressSanitizer\n" % (router, component)
1012 )
1013 # Sanitizer Error found in log
1014 pidMark = asanErrorRe.group(1)
1015 addressSanitizerLog = re.search(
1016 "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL
1017 )
1018 if addressSanitizerLog:
1019 # Find Calling Test. Could be multiple steps back
1020 testframe = sys._current_frames().values()[0]
1021 level = 0
1022 while level < 10:
1023 test = os.path.splitext(
1024 os.path.basename(testframe.f_globals["__file__"])
1025 )[0]
1026 if (test != "topotest") and (test != "topogen"):
1027 # Found the calling test
1028 callingTest = os.path.basename(testframe.f_globals["__file__"])
1029 break
1030 level = level + 1
1031 testframe = testframe.f_back
1032 if level >= 10:
1033 # somehow couldn't find the test script.
1034 callingTest = "unknownTest"
1035 #
1036 # Now finding Calling Procedure
1037 level = 0
1038 while level < 20:
1039 callingProc = sys._getframe(level).f_code.co_name
1040 if (
1041 (callingProc != "processAddressSanitizerError")
1042 and (callingProc != "checkAddressSanitizerError")
1043 and (callingProc != "checkRouterCores")
1044 and (callingProc != "stopRouter")
1045 and (callingProc != "stop")
1046 and (callingProc != "stop_topology")
1047 and (callingProc != "checkRouterRunning")
1048 and (callingProc != "check_router_running")
1049 and (callingProc != "routers_have_failure")
1050 ):
1051 # Found the calling test
1052 break
1053 level = level + 1
1054 if level >= 20:
1055 # something wrong - couldn't found the calling test function
1056 callingProc = "unknownProc"
1057 with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile:
1058 sys.stderr.write(
1059 "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1060 % (callingTest, callingProc, router)
1061 )
1062 sys.stderr.write(
1063 "\n".join(addressSanitizerLog.group(1).splitlines()) + "\n"
1064 )
1065 addrSanFile.write("## Error: %s\n\n" % asanErrorRe.group(2))
1066 addrSanFile.write(
1067 "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1068 % (callingTest, callingProc, router)
1069 )
1070 addrSanFile.write(
1071 " "
1072 + "\n ".join(addressSanitizerLog.group(1).splitlines())
1073 + "\n"
1074 )
1075 addrSanFile.write("\n---------------\n")
1076 return
1077
1078 addressSanitizerError = re.search(
1079 r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
1080 )
1081 if addressSanitizerError:
1082 processAddressSanitizerError(addressSanitizerError, output, router, component)
1083 return True
1084
1085 # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
1086 if logdir:
1087 filepattern = logdir + "/" + router + "/" + component + ".asan.*"
1088 logger.debug(
1089 "Log check for %s on %s, pattern %s\n" % (component, router, filepattern)
1090 )
1091 for file in glob.glob(filepattern):
1092 with open(file, "r") as asanErrorFile:
1093 asanError = asanErrorFile.read()
1094 addressSanitizerError = re.search(
1095 r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
1096 )
1097 if addressSanitizerError:
1098 processAddressSanitizerError(
1099 addressSanitizerError, asanError, router, component
1100 )
1101 return True
1102 return False
1103
1104
1105 def _sysctl_atleast(commander, variable, min_value):
1106 if isinstance(min_value, tuple):
1107 min_value = list(min_value)
1108 is_list = isinstance(min_value, list)
1109
1110 sval = commander.cmd_raises("sysctl -n " + variable).strip()
1111 if is_list:
1112 cur_val = [int(x) for x in sval.split()]
1113 else:
1114 cur_val = int(sval)
1115
1116 set_value = False
1117 if is_list:
1118 for i, v in enumerate(cur_val):
1119 if v < min_value[i]:
1120 set_value = True
1121 else:
1122 min_value[i] = v
1123 else:
1124 if cur_val < min_value:
1125 set_value = True
1126 if set_value:
1127 if is_list:
1128 valstr = " ".join([str(x) for x in min_value])
1129 else:
1130 valstr = str(min_value)
1131 logger.info("Increasing sysctl %s from %s to %s", variable, cur_val, valstr)
1132 commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
1133
1134
1135 def _sysctl_assure(commander, variable, value):
1136 if isinstance(value, tuple):
1137 value = list(value)
1138 is_list = isinstance(value, list)
1139
1140 sval = commander.cmd_raises("sysctl -n " + variable).strip()
1141 if is_list:
1142 cur_val = [int(x) for x in sval.split()]
1143 else:
1144 cur_val = sval
1145
1146 set_value = False
1147 if is_list:
1148 for i, v in enumerate(cur_val):
1149 if v != value[i]:
1150 set_value = True
1151 else:
1152 value[i] = v
1153 else:
1154 if cur_val != str(value):
1155 set_value = True
1156
1157 if set_value:
1158 if is_list:
1159 valstr = " ".join([str(x) for x in value])
1160 else:
1161 valstr = str(value)
1162 logger.info("Changing sysctl %s from %s to %s", variable, cur_val, valstr)
1163 commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
1164
1165
1166 def sysctl_atleast(commander, variable, min_value, raises=False):
1167 try:
1168 if commander is None:
1169 commander = micronet.Commander("topotest")
1170 return _sysctl_atleast(commander, variable, min_value)
1171 except subprocess.CalledProcessError as error:
1172 logger.warning(
1173 "%s: Failed to assure sysctl min value %s = %s",
1174 commander,
1175 variable,
1176 min_value,
1177 )
1178 if raises:
1179 raise
1180
1181
1182 def sysctl_assure(commander, variable, value, raises=False):
1183 try:
1184 if commander is None:
1185 commander = micronet.Commander("topotest")
1186 return _sysctl_assure(commander, variable, value)
1187 except subprocess.CalledProcessError as error:
1188 logger.warning(
1189 "%s: Failed to assure sysctl value %s = %s",
1190 commander,
1191 variable,
1192 value,
1193 exc_info=True,
1194 )
1195 if raises:
1196 raise
1197
1198
1199 def rlimit_atleast(rname, min_value, raises=False):
1200 try:
1201 cval = resource.getrlimit(rname)
1202 soft, hard = cval
1203 if soft < min_value:
1204 nval = (min_value, hard if min_value < hard else min_value)
1205 logger.info("Increasing rlimit %s from %s to %s", rname, cval, nval)
1206 resource.setrlimit(rname, nval)
1207 except subprocess.CalledProcessError as error:
1208 logger.warning(
1209 "Failed to assure rlimit [%s] = %s", rname, min_value, exc_info=True
1210 )
1211 if raises:
1212 raise
1213
1214
1215 def fix_netns_limits(ns):
1216
1217 # Maximum read and write socket buffer sizes
1218 sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20])
1219 sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20])
1220
1221 sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0)
1222 sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0)
1223 sysctl_assure(ns, "net.ipv4.conf.lo.rp_filter", 0)
1224
1225 sysctl_assure(ns, "net.ipv4.conf.all.forwarding", 1)
1226 sysctl_assure(ns, "net.ipv4.conf.default.forwarding", 1)
1227
1228 # XXX if things fail look here as this wasn't done previously
1229 sysctl_assure(ns, "net.ipv6.conf.all.forwarding", 1)
1230 sysctl_assure(ns, "net.ipv6.conf.default.forwarding", 1)
1231
1232 # ARP
1233 sysctl_assure(ns, "net.ipv4.conf.default.arp_announce", 2)
1234 sysctl_assure(ns, "net.ipv4.conf.default.arp_notify", 1)
1235 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1236 sysctl_assure(ns, "net.ipv4.conf.default.arp_ignore", 0)
1237 sysctl_assure(ns, "net.ipv4.conf.all.arp_announce", 2)
1238 sysctl_assure(ns, "net.ipv4.conf.all.arp_notify", 1)
1239 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1240 sysctl_assure(ns, "net.ipv4.conf.all.arp_ignore", 0)
1241
1242 sysctl_assure(ns, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
1243
1244 # Keep ipv6 permanent addresses on an admin down
1245 sysctl_assure(ns, "net.ipv6.conf.all.keep_addr_on_down", 1)
1246 if version_cmp(platform.release(), "4.20") >= 0:
1247 sysctl_assure(ns, "net.ipv6.route.skip_notify_on_dev_down", 1)
1248
1249 sysctl_assure(ns, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
1250 sysctl_assure(ns, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
1251
1252 # igmp
1253 sysctl_atleast(ns, "net.ipv4.igmp_max_memberships", 1000)
1254
1255 # Use neigh information on selection of nexthop for multipath hops
1256 sysctl_assure(ns, "net.ipv4.fib_multipath_use_neigh", 1)
1257
1258
1259 def fix_host_limits():
1260 """Increase system limits."""
1261
1262 rlimit_atleast(resource.RLIMIT_NPROC, 8 * 1024)
1263 rlimit_atleast(resource.RLIMIT_NOFILE, 16 * 1024)
1264 sysctl_atleast(None, "fs.file-max", 16 * 1024)
1265 sysctl_atleast(None, "kernel.pty.max", 16 * 1024)
1266
1267 # Enable coredumps
1268 # Original on ubuntu 17.x, but apport won't save as in namespace
1269 # |/usr/share/apport/apport %p %s %c %d %P
1270 sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
1271 sysctl_assure(None, "kernel.core_uses_pid", 1)
1272 sysctl_assure(None, "fs.suid_dumpable", 1)
1273
1274 # Maximum connection backlog
1275 sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
1276
1277 # Maximum read and write socket buffer sizes
1278 sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20)
1279 sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20)
1280
1281 # Garbage Collection Settings for ARP and Neighbors
1282 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
1283 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024)
1284 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024)
1285 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024)
1286 # Hold entries for 10 minutes
1287 sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1288 sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1289
1290 # igmp
1291 sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
1292
1293 # MLD
1294 sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
1295
1296 # Increase routing table size to 128K
1297 sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024)
1298 sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024)
1299
1300
1301 def setup_node_tmpdir(logdir, name):
1302 # Cleanup old log, valgrind, and core files.
1303 subprocess.check_call(
1304 "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir, name), shell=True
1305 )
1306
1307 # Setup the per node directory.
1308 nodelogdir = "{}/{}".format(logdir, name)
1309 subprocess.check_call(
1310 "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True
1311 )
1312 logfile = "{0}/{1}.log".format(logdir, name)
1313 return logfile
1314
1315
1316 class Router(Node):
1317 "A Node with IPv4/IPv6 forwarding enabled"
1318
1319 def __init__(self, name, **params):
1320
1321 # Backward compatibility:
1322 # Load configuration defaults like topogen.
1323 self.config_defaults = configparser.ConfigParser(
1324 defaults={
1325 "verbosity": "info",
1326 "frrdir": "/usr/lib/frr",
1327 "routertype": "frr",
1328 "memleak_path": "",
1329 }
1330 )
1331
1332 self.config_defaults.read(
1333 os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini")
1334 )
1335
1336 # If this topology is using old API and doesn't have logdir
1337 # specified, then attempt to generate an unique logdir.
1338 self.logdir = params.get("logdir")
1339 if self.logdir is None:
1340 self.logdir = get_logs_path(g_extra_config["rundir"])
1341
1342 if not params.get("logger"):
1343 # If logger is present topogen has already set this up
1344 logfile = setup_node_tmpdir(self.logdir, name)
1345 l = topolog.get_logger(name, log_level="debug", target=logfile)
1346 params["logger"] = l
1347
1348 super(Router, self).__init__(name, **params)
1349
1350 self.daemondir = None
1351 self.hasmpls = False
1352 self.routertype = "frr"
1353 self.unified_config = None
1354 self.daemons = {
1355 "zebra": 0,
1356 "ripd": 0,
1357 "ripngd": 0,
1358 "ospfd": 0,
1359 "ospf6d": 0,
1360 "isisd": 0,
1361 "bgpd": 0,
1362 "pimd": 0,
1363 "pim6d": 0,
1364 "ldpd": 0,
1365 "eigrpd": 0,
1366 "nhrpd": 0,
1367 "staticd": 0,
1368 "bfdd": 0,
1369 "sharpd": 0,
1370 "babeld": 0,
1371 "pbrd": 0,
1372 "pathd": 0,
1373 "snmpd": 0,
1374 }
1375 self.daemons_options = {"zebra": ""}
1376 self.reportCores = True
1377 self.version = None
1378
1379 self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid)
1380 try:
1381 # Allow escaping from running inside docker
1382 cgroup = open("/proc/1/cgroup").read()
1383 m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup)
1384 if m:
1385 self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd
1386 except IOError:
1387 pass
1388 else:
1389 logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd))
1390
1391 def _config_frr(self, **params):
1392 "Configure FRR binaries"
1393 self.daemondir = params.get("frrdir")
1394 if self.daemondir is None:
1395 self.daemondir = self.config_defaults.get("topogen", "frrdir")
1396
1397 zebra_path = os.path.join(self.daemondir, "zebra")
1398 if not os.path.isfile(zebra_path):
1399 raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
1400
1401 # pylint: disable=W0221
1402 # Some params are only meaningful for the parent class.
1403 def config(self, **params):
1404 super(Router, self).config(**params)
1405
1406 # User did not specify the daemons directory, try to autodetect it.
1407 self.daemondir = params.get("daemondir")
1408 if self.daemondir is None:
1409 self.routertype = params.get(
1410 "routertype", self.config_defaults.get("topogen", "routertype")
1411 )
1412 self._config_frr(**params)
1413 else:
1414 # Test the provided path
1415 zpath = os.path.join(self.daemondir, "zebra")
1416 if not os.path.isfile(zpath):
1417 raise Exception("No zebra binary found in {}".format(zpath))
1418 # Allow user to specify routertype when the path was specified.
1419 if params.get("routertype") is not None:
1420 self.routertype = params.get("routertype")
1421
1422 # Set ownership of config files
1423 self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype))
1424
1425 def terminate(self):
1426 # Stop running FRR daemons
1427 self.stopRouter()
1428 super(Router, self).terminate()
1429 os.system("chmod -R go+rw " + self.logdir)
1430
1431 # Return count of running daemons
1432 def listDaemons(self):
1433 ret = []
1434 rc, stdout, _ = self.cmd_status(
1435 "ls -1 /var/run/%s/*.pid" % self.routertype, warn=False
1436 )
1437 if rc:
1438 return ret
1439 for d in stdout.strip().split("\n"):
1440 pidfile = d.strip()
1441 try:
1442 pid = int(self.cmd_raises("cat %s" % pidfile, warn=False).strip())
1443 name = os.path.basename(pidfile[:-4])
1444
1445 # probably not compatible with bsd.
1446 rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False)
1447 if rc:
1448 logger.warning(
1449 "%s: %s exited leaving pidfile %s (%s)",
1450 self.name,
1451 name,
1452 pidfile,
1453 pid,
1454 )
1455 self.cmd("rm -- " + pidfile)
1456 else:
1457 ret.append((name, pid))
1458 except (subprocess.CalledProcessError, ValueError):
1459 pass
1460 return ret
1461
1462 def stopRouter(self, assertOnError=True, minErrorVersion="5.1"):
1463 # Stop Running FRR Daemons
1464 running = self.listDaemons()
1465 if not running:
1466 return ""
1467
1468 logger.info("%s: stopping %s", self.name, ", ".join([x[0] for x in running]))
1469 for name, pid in running:
1470 logger.info("{}: sending SIGTERM to {}".format(self.name, name))
1471 try:
1472 os.kill(pid, signal.SIGTERM)
1473 except OSError as err:
1474 logger.info(
1475 "%s: could not kill %s (%s): %s", self.name, name, pid, str(err)
1476 )
1477
1478 running = self.listDaemons()
1479 if running:
1480 for _ in range(0, 30):
1481 sleep(
1482 0.5,
1483 "{}: waiting for daemons stopping: {}".format(
1484 self.name, ", ".join([x[0] for x in running])
1485 ),
1486 )
1487 running = self.listDaemons()
1488 if not running:
1489 break
1490
1491 if not running:
1492 return ""
1493
1494 logger.warning(
1495 "%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running])
1496 )
1497 for name, pid in running:
1498 pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
1499 logger.info("%s: killing %s", self.name, name)
1500 self.cmd("kill -SIGBUS %d" % pid)
1501 self.cmd("rm -- " + pidfile)
1502
1503 sleep(
1504 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name
1505 )
1506
1507 errors = self.checkRouterCores(reportOnce=True)
1508 if self.checkRouterVersion("<", minErrorVersion):
1509 # ignore errors in old versions
1510 errors = ""
1511 if assertOnError and (errors is not None) and len(errors) > 0:
1512 assert "Errors found - details follow:" == 0, errors
1513 return errors
1514
1515 def removeIPs(self):
1516 for interface in self.intfNames():
1517 try:
1518 self.intf_ip_cmd(interface, "ip address flush " + interface)
1519 except Exception as ex:
1520 logger.error("%s can't remove IPs %s", self, str(ex))
1521 # pdb.set_trace()
1522 # assert False, "can't remove IPs %s" % str(ex)
1523
1524 def checkCapability(self, daemon, param):
1525 if param is not None:
1526 daemon_path = os.path.join(self.daemondir, daemon)
1527 daemon_search_option = param.replace("-", "")
1528 output = self.cmd(
1529 "{0} -h | grep {1}".format(daemon_path, daemon_search_option)
1530 )
1531 if daemon_search_option not in output:
1532 return False
1533 return True
1534
1535 def loadConf(self, daemon, source=None, param=None):
1536 """Enabled and set config for a daemon.
1537
1538 Arranges for loading of daemon configuration from the specified source. Possible
1539 `source` values are `None` for an empty config file, a path name which is used
1540 directly, or a file name with no path components which is first looked for
1541 directly and then looked for under a sub-directory named after router.
1542 """
1543
1544 # Unfortunately this API allowsfor source to not exist for any and all routers.
1545 if source:
1546 head, tail = os.path.split(source)
1547 if not head and not self.path_exists(tail):
1548 script_dir = os.environ["PYTEST_TOPOTEST_SCRIPTDIR"]
1549 router_relative = os.path.join(script_dir, self.name, tail)
1550 if self.path_exists(router_relative):
1551 source = router_relative
1552 self.logger.info(
1553 "using router relative configuration: {}".format(source)
1554 )
1555
1556 # print "Daemons before:", self.daemons
1557 if daemon in self.daemons.keys() or daemon == "frr":
1558 if daemon == "frr":
1559 self.unified_config = 1
1560 else:
1561 self.daemons[daemon] = 1
1562 if param is not None:
1563 self.daemons_options[daemon] = param
1564 conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon)
1565 if source is None or not os.path.exists(source):
1566 if daemon == "frr" or not self.unified_config:
1567 self.cmd_raises("rm -f " + conf_file)
1568 self.cmd_raises("touch " + conf_file)
1569 else:
1570 self.cmd_raises("cp {} {}".format(source, conf_file))
1571
1572 if not self.unified_config or daemon == "frr":
1573 self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file))
1574 self.cmd_raises("chmod 664 {}".format(conf_file))
1575
1576 if (daemon == "snmpd") and (self.routertype == "frr"):
1577 # /etc/snmp is private mount now
1578 self.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf')
1579 self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
1580
1581 if (daemon == "zebra") and (self.daemons["staticd"] == 0):
1582 # Add staticd with zebra - if it exists
1583 try:
1584 staticd_path = os.path.join(self.daemondir, "staticd")
1585 except:
1586 pdb.set_trace()
1587
1588 if os.path.isfile(staticd_path):
1589 self.daemons["staticd"] = 1
1590 self.daemons_options["staticd"] = ""
1591 # Auto-Started staticd has no config, so it will read from zebra config
1592 else:
1593 logger.info("No daemon {} known".format(daemon))
1594 # print "Daemons after:", self.daemons
1595
1596 def runInWindow(self, cmd, title=None):
1597 return self.run_in_window(cmd, title)
1598
1599 def startRouter(self, tgen=None):
1600 if self.unified_config:
1601 self.cmd(
1602 'echo "service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1603 % self.routertype
1604 )
1605 else:
1606 # Disable integrated-vtysh-config
1607 self.cmd(
1608 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1609 % self.routertype
1610 )
1611
1612 self.cmd(
1613 "chown %s:%svty /etc/%s/vtysh.conf"
1614 % (self.routertype, self.routertype, self.routertype)
1615 )
1616 # TODO remove the following lines after all tests are migrated to Topogen.
1617 # Try to find relevant old logfiles in /tmp and delete them
1618 map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
1619 # Remove old core files
1620 map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name)))
1621 # Remove IP addresses from OS first - we have them in zebra.conf
1622 self.removeIPs()
1623 # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
1624 # No error - but return message and skip all the tests
1625 if self.daemons["ldpd"] == 1:
1626 ldpd_path = os.path.join(self.daemondir, "ldpd")
1627 if not os.path.isfile(ldpd_path):
1628 logger.info("LDP Test, but no ldpd compiled or installed")
1629 return "LDP Test, but no ldpd compiled or installed"
1630
1631 if version_cmp(platform.release(), "4.5") < 0:
1632 logger.info("LDP Test need Linux Kernel 4.5 minimum")
1633 return "LDP Test need Linux Kernel 4.5 minimum"
1634 # Check if have mpls
1635 if tgen != None:
1636 self.hasmpls = tgen.hasmpls
1637 if self.hasmpls != True:
1638 logger.info(
1639 "LDP/MPLS Tests will be skipped, platform missing module(s)"
1640 )
1641 else:
1642 # Test for MPLS Kernel modules available
1643 self.hasmpls = False
1644 if not module_present("mpls-router"):
1645 logger.info(
1646 "MPLS tests will not run (missing mpls-router kernel module)"
1647 )
1648 elif not module_present("mpls-iptunnel"):
1649 logger.info(
1650 "MPLS tests will not run (missing mpls-iptunnel kernel module)"
1651 )
1652 else:
1653 self.hasmpls = True
1654 if self.hasmpls != True:
1655 return "LDP/MPLS Tests need mpls kernel modules"
1656
1657 # Really want to use sysctl_atleast here, but only when MPLS is actually being
1658 # used
1659 self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
1660
1661 shell_routers = g_extra_config["shell"]
1662 if "all" in shell_routers or self.name in shell_routers:
1663 self.run_in_window(os.getenv("SHELL", "bash"), title="sh-%s" % self.name)
1664
1665 if self.daemons["eigrpd"] == 1:
1666 eigrpd_path = os.path.join(self.daemondir, "eigrpd")
1667 if not os.path.isfile(eigrpd_path):
1668 logger.info("EIGRP Test, but no eigrpd compiled or installed")
1669 return "EIGRP Test, but no eigrpd compiled or installed"
1670
1671 if self.daemons["bfdd"] == 1:
1672 bfdd_path = os.path.join(self.daemondir, "bfdd")
1673 if not os.path.isfile(bfdd_path):
1674 logger.info("BFD Test, but no bfdd compiled or installed")
1675 return "BFD Test, but no bfdd compiled or installed"
1676
1677 status = self.startRouterDaemons(tgen=tgen)
1678
1679 vtysh_routers = g_extra_config["vtysh"]
1680 if "all" in vtysh_routers or self.name in vtysh_routers:
1681 self.run_in_window("vtysh", title="vt-%s" % self.name)
1682
1683 if self.unified_config:
1684 self.cmd("vtysh -f /etc/frr/frr.conf")
1685
1686 return status
1687
1688 def getStdErr(self, daemon):
1689 return self.getLog("err", daemon)
1690
1691 def getStdOut(self, daemon):
1692 return self.getLog("out", daemon)
1693
1694 def getLog(self, log, daemon):
1695 return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log))
1696
1697 def startRouterDaemons(self, daemons=None, tgen=None):
1698 "Starts FRR daemons for this router."
1699
1700 asan_abort = g_extra_config["asan_abort"]
1701 gdb_breakpoints = g_extra_config["gdb_breakpoints"]
1702 gdb_daemons = g_extra_config["gdb_daemons"]
1703 gdb_routers = g_extra_config["gdb_routers"]
1704 valgrind_extra = g_extra_config["valgrind_extra"]
1705 valgrind_memleaks = g_extra_config["valgrind_memleaks"]
1706 strace_daemons = g_extra_config["strace_daemons"]
1707
1708 # Get global bundle data
1709 if not self.path_exists("/etc/frr/support_bundle_commands.conf"):
1710 # Copy global value if was covered by namespace mount
1711 bundle_data = ""
1712 if os.path.exists("/etc/frr/support_bundle_commands.conf"):
1713 with open("/etc/frr/support_bundle_commands.conf", "r") as rf:
1714 bundle_data = rf.read()
1715 self.cmd_raises(
1716 "cat > /etc/frr/support_bundle_commands.conf",
1717 stdin=bundle_data,
1718 )
1719
1720 # Starts actual daemons without init (ie restart)
1721 # cd to per node directory
1722 self.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self.logdir, self.name))
1723 self.set_cwd("{}/{}".format(self.logdir, self.name))
1724 self.cmd("umask 000")
1725
1726 # Re-enable to allow for report per run
1727 self.reportCores = True
1728
1729 # XXX: glue code forward ported from removed function.
1730 if self.version == None:
1731 self.version = self.cmd(
1732 os.path.join(self.daemondir, "bgpd") + " -v"
1733 ).split()[2]
1734 logger.info("{}: running version: {}".format(self.name, self.version))
1735 # If `daemons` was specified then some upper API called us with
1736 # specific daemons, otherwise just use our own configuration.
1737 daemons_list = []
1738 if daemons is not None:
1739 daemons_list = daemons
1740 else:
1741 # Append all daemons configured.
1742 for daemon in self.daemons:
1743 if self.daemons[daemon] == 1:
1744 daemons_list.append(daemon)
1745
1746 def start_daemon(daemon, extra_opts=None):
1747 daemon_opts = self.daemons_options.get(daemon, "")
1748 rediropt = " > {0}.out 2> {0}.err".format(daemon)
1749 if daemon == "snmpd":
1750 binary = "/usr/sbin/snmpd"
1751 cmdenv = ""
1752 cmdopt = "{} -C -c /etc/frr/snmpd.conf -p ".format(
1753 daemon_opts
1754 ) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self.routertype)
1755 else:
1756 binary = os.path.join(self.daemondir, daemon)
1757
1758 cmdenv = "ASAN_OPTIONS="
1759 if asan_abort:
1760 cmdenv = "abort_on_error=1:"
1761 cmdenv += "log_path={0}/{1}.{2}.asan ".format(
1762 self.logdir, self.name, daemon
1763 )
1764
1765 if valgrind_memleaks:
1766 this_dir = os.path.dirname(
1767 os.path.abspath(os.path.realpath(__file__))
1768 )
1769 supp_file = os.path.abspath(
1770 os.path.join(this_dir, "../../../tools/valgrind.supp")
1771 )
1772 cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
1773 daemon, self.logdir, self.name, supp_file
1774 )
1775 if valgrind_extra:
1776 cmdenv += (
1777 " --gen-suppressions=all --expensive-definedness-checks=yes"
1778 )
1779 elif daemon in strace_daemons or "all" in strace_daemons:
1780 cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(
1781 daemon, self.logdir, self.name
1782 )
1783
1784 cmdopt = "{} --command-log-always --log file:{}.log --log-level debug".format(
1785 daemon_opts, daemon
1786 )
1787 if extra_opts:
1788 cmdopt += " " + extra_opts
1789
1790 if (
1791 (gdb_routers or gdb_daemons)
1792 and (
1793 not gdb_routers or self.name in gdb_routers or "all" in gdb_routers
1794 )
1795 and (not gdb_daemons or daemon in gdb_daemons or "all" in gdb_daemons)
1796 ):
1797 if daemon == "snmpd":
1798 cmdopt += " -f "
1799
1800 cmdopt += rediropt
1801 gdbcmd = "sudo -E gdb " + binary
1802 if gdb_breakpoints:
1803 gdbcmd += " -ex 'set breakpoint pending on'"
1804 for bp in gdb_breakpoints:
1805 gdbcmd += " -ex 'b {}'".format(bp)
1806 gdbcmd += " -ex 'run {}'".format(cmdopt)
1807
1808 self.run_in_window(gdbcmd, daemon)
1809
1810 logger.info(
1811 "%s: %s %s launched in gdb window", self, self.routertype, daemon
1812 )
1813 else:
1814 if daemon != "snmpd":
1815 cmdopt += " -d "
1816 cmdopt += rediropt
1817
1818 try:
1819 self.cmd_raises(" ".join([cmdenv, binary, cmdopt]), warn=False)
1820 except subprocess.CalledProcessError as error:
1821 self.logger.error(
1822 '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
1823 self,
1824 daemon,
1825 error.returncode,
1826 error.cmd,
1827 '\n:stdout: "{}"'.format(error.stdout.strip())
1828 if error.stdout
1829 else "",
1830 '\n:stderr: "{}"'.format(error.stderr.strip())
1831 if error.stderr
1832 else "",
1833 )
1834 else:
1835 logger.info("%s: %s %s started", self, self.routertype, daemon)
1836
1837 # Start Zebra first
1838 if "zebra" in daemons_list:
1839 start_daemon("zebra", "-s 90000000")
1840 while "zebra" in daemons_list:
1841 daemons_list.remove("zebra")
1842
1843 # Start staticd next if required
1844 if "staticd" in daemons_list:
1845 start_daemon("staticd")
1846 while "staticd" in daemons_list:
1847 daemons_list.remove("staticd")
1848
1849 if "snmpd" in daemons_list:
1850 # Give zerbra a chance to configure interface addresses that snmpd daemon
1851 # may then use.
1852 time.sleep(2)
1853
1854 start_daemon("snmpd")
1855 while "snmpd" in daemons_list:
1856 daemons_list.remove("snmpd")
1857
1858 if daemons is None:
1859 # Fix Link-Local Addresses on initial startup
1860 # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
1861 _, output, _ = self.cmd_status(
1862 "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done",
1863 stderr=subprocess.STDOUT,
1864 )
1865 logger.debug("Set MACs:\n%s", output)
1866
1867 # Now start all the other daemons
1868 for daemon in daemons_list:
1869 if self.daemons[daemon] == 0:
1870 continue
1871 start_daemon(daemon)
1872
1873 # Check if daemons are running.
1874 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1875 if re.search(r"No such file or directory", rundaemons):
1876 return "Daemons are not running"
1877
1878 # Update the permissions on the log files
1879 self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name))
1880 self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name))
1881
1882 return ""
1883
1884 def killRouterDaemons(
1885 self, daemons, wait=True, assertOnError=True, minErrorVersion="5.1"
1886 ):
1887 # Kill Running FRR
1888 # Daemons(user specified daemon only) using SIGKILL
1889 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1890 errors = ""
1891 daemonsNotRunning = []
1892 if re.search(r"No such file or directory", rundaemons):
1893 return errors
1894 for daemon in daemons:
1895 if rundaemons is not None and daemon in rundaemons:
1896 numRunning = 0
1897 dmns = rundaemons.split("\n")
1898 # Exclude empty string at end of list
1899 for d in dmns[:-1]:
1900 if re.search(r"%s" % daemon, d):
1901 daemonpidfile = d.rstrip()
1902 daemonpid = self.cmd("cat %s" % daemonpidfile).rstrip()
1903 if daemonpid.isdigit() and pid_exists(int(daemonpid)):
1904 logger.info(
1905 "{}: killing {}".format(
1906 self.name,
1907 os.path.basename(daemonpidfile.rsplit(".", 1)[0]),
1908 )
1909 )
1910 os.kill(int(daemonpid), signal.SIGKILL)
1911 if pid_exists(int(daemonpid)):
1912 numRunning += 1
1913 while wait and numRunning > 0:
1914 sleep(
1915 2,
1916 "{}: waiting for {} daemon to be stopped".format(
1917 self.name, daemon
1918 ),
1919 )
1920
1921 # 2nd round of kill if daemons didn't exit
1922 for d in dmns[:-1]:
1923 if re.search(r"%s" % daemon, d):
1924 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1925 if daemonpid.isdigit() and pid_exists(
1926 int(daemonpid)
1927 ):
1928 logger.info(
1929 "{}: killing {}".format(
1930 self.name,
1931 os.path.basename(
1932 d.rstrip().rsplit(".", 1)[0]
1933 ),
1934 )
1935 )
1936 os.kill(int(daemonpid), signal.SIGKILL)
1937 if daemonpid.isdigit() and not pid_exists(
1938 int(daemonpid)
1939 ):
1940 numRunning -= 1
1941 self.cmd("rm -- {}".format(daemonpidfile))
1942 if wait:
1943 errors = self.checkRouterCores(reportOnce=True)
1944 if self.checkRouterVersion("<", minErrorVersion):
1945 # ignore errors in old versions
1946 errors = ""
1947 if assertOnError and len(errors) > 0:
1948 assert "Errors found - details follow:" == 0, errors
1949 else:
1950 daemonsNotRunning.append(daemon)
1951 if len(daemonsNotRunning) > 0:
1952 errors = errors + "Daemons are not running", daemonsNotRunning
1953
1954 return errors
1955
1956 def checkRouterCores(self, reportLeaks=True, reportOnce=False):
1957 if reportOnce and not self.reportCores:
1958 return
1959 reportMade = False
1960 traces = ""
1961 for daemon in self.daemons:
1962 if self.daemons[daemon] == 1:
1963 # Look for core file
1964 corefiles = glob.glob(
1965 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
1966 )
1967 if len(corefiles) > 0:
1968 backtrace = gdb_core(self, daemon, corefiles)
1969 traces = (
1970 traces
1971 + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s"
1972 % (self.name, daemon, backtrace)
1973 )
1974 reportMade = True
1975 elif reportLeaks:
1976 log = self.getStdErr(daemon)
1977 if "memstats" in log:
1978 sys.stderr.write(
1979 "%s: %s has memory leaks:\n" % (self.name, daemon)
1980 )
1981 traces = traces + "\n%s: %s has memory leaks:\n" % (
1982 self.name,
1983 daemon,
1984 )
1985 log = re.sub("core_handler: ", "", log)
1986 log = re.sub(
1987 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
1988 r"\n ## \1",
1989 log,
1990 )
1991 log = re.sub("memstats: ", " ", log)
1992 sys.stderr.write(log)
1993 reportMade = True
1994 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
1995 if checkAddressSanitizerError(
1996 self.getStdErr(daemon), self.name, daemon, self.logdir
1997 ):
1998 sys.stderr.write(
1999 "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)
2000 )
2001 traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (
2002 self.name,
2003 daemon,
2004 )
2005 reportMade = True
2006 if reportMade:
2007 self.reportCores = False
2008 return traces
2009
2010 def checkRouterRunning(self):
2011 "Check if router daemons are running and collect crashinfo they don't run"
2012
2013 global fatal_error
2014
2015 daemonsRunning = self.cmd(
2016 'vtysh -c "show logging" | grep "Logging configuration for"'
2017 )
2018 # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
2019 if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"):
2020 return "%s: vtysh killed by AddressSanitizer" % (self.name)
2021
2022 for daemon in self.daemons:
2023 if daemon == "snmpd":
2024 continue
2025 if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
2026 sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
2027 if daemon == "staticd":
2028 sys.stderr.write(
2029 "You may have a copy of staticd installed but are attempting to test against\n"
2030 )
2031 sys.stderr.write(
2032 "a version of FRR that does not have staticd, please cleanup the install dir\n"
2033 )
2034
2035 # Look for core file
2036 corefiles = glob.glob(
2037 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
2038 )
2039 if len(corefiles) > 0:
2040 gdb_core(self, daemon, corefiles)
2041 else:
2042 # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
2043 if os.path.isfile(
2044 "{}/{}/{}.log".format(self.logdir, self.name, daemon)
2045 ):
2046 log_tail = subprocess.check_output(
2047 [
2048 "tail -n20 {}/{}/{}.log 2> /dev/null".format(
2049 self.logdir, self.name, daemon
2050 )
2051 ],
2052 shell=True,
2053 )
2054 sys.stderr.write(
2055 "\nFrom %s %s %s log file:\n"
2056 % (self.routertype, self.name, daemon)
2057 )
2058 sys.stderr.write("%s\n" % log_tail)
2059
2060 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2061 if checkAddressSanitizerError(
2062 self.getStdErr(daemon), self.name, daemon, self.logdir
2063 ):
2064 return "%s: Daemon %s not running - killed by AddressSanitizer" % (
2065 self.name,
2066 daemon,
2067 )
2068
2069 return "%s: Daemon %s not running" % (self.name, daemon)
2070 return ""
2071
2072 def checkRouterVersion(self, cmpop, version):
2073 """
2074 Compares router version using operation `cmpop` with `version`.
2075 Valid `cmpop` values:
2076 * `>=`: has the same version or greater
2077 * '>': has greater version
2078 * '=': has the same version
2079 * '<': has a lesser version
2080 * '<=': has the same version or lesser
2081
2082 Usage example: router.checkRouterVersion('>', '1.0')
2083 """
2084
2085 # Make sure we have version information first
2086 if self.version == None:
2087 self.version = self.cmd(
2088 os.path.join(self.daemondir, "bgpd") + " -v"
2089 ).split()[2]
2090 logger.info("{}: running version: {}".format(self.name, self.version))
2091
2092 rversion = self.version
2093 if rversion == None:
2094 return False
2095
2096 result = version_cmp(rversion, version)
2097 if cmpop == ">=":
2098 return result >= 0
2099 if cmpop == ">":
2100 return result > 0
2101 if cmpop == "=":
2102 return result == 0
2103 if cmpop == "<":
2104 return result < 0
2105 if cmpop == "<":
2106 return result < 0
2107 if cmpop == "<=":
2108 return result <= 0
2109
2110 def get_ipv6_linklocal(self):
2111 "Get LinkLocal Addresses from interfaces"
2112
2113 linklocal = []
2114
2115 ifaces = self.cmd("ip -6 address")
2116 # Fix newlines (make them all the same)
2117 ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
2118 interface = ""
2119 ll_per_if_count = 0
2120 for line in ifaces:
2121 m = re.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line)
2122 if m:
2123 interface = m.group(1)
2124 ll_per_if_count = 0
2125 m = re.search(
2126 "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
2127 line,
2128 )
2129 if m:
2130 local = m.group(1)
2131 ll_per_if_count += 1
2132 if ll_per_if_count > 1:
2133 linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
2134 else:
2135 linklocal += [[interface, local]]
2136 return linklocal
2137
2138 def daemon_available(self, daemon):
2139 "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
2140
2141 daemon_path = os.path.join(self.daemondir, daemon)
2142 if not os.path.isfile(daemon_path):
2143 return False
2144 if daemon == "ldpd":
2145 if version_cmp(platform.release(), "4.5") < 0:
2146 return False
2147 if not module_present("mpls-router", load=False):
2148 return False
2149 if not module_present("mpls-iptunnel", load=False):
2150 return False
2151 return True
2152
2153 def get_routertype(self):
2154 "Return the type of Router (frr)"
2155
2156 return self.routertype
2157
2158 def report_memory_leaks(self, filename_prefix, testscript):
2159 "Report Memory Leaks to file prefixed with given string"
2160
2161 leakfound = False
2162 filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt"
2163 for daemon in self.daemons:
2164 if self.daemons[daemon] == 1:
2165 log = self.getStdErr(daemon)
2166 if "memstats" in log:
2167 # Found memory leak
2168 logger.info(
2169 "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log)
2170 )
2171 if not leakfound:
2172 leakfound = True
2173 # Check if file already exists
2174 fileexists = os.path.isfile(filename)
2175 leakfile = open(filename, "a")
2176 if not fileexists:
2177 # New file - add header
2178 leakfile.write(
2179 "# Memory Leak Detection for topotest %s\n\n"
2180 % testscript
2181 )
2182 leakfile.write("## Router %s\n" % self.name)
2183 leakfile.write("### Process %s\n" % daemon)
2184 log = re.sub("core_handler: ", "", log)
2185 log = re.sub(
2186 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
2187 r"\n#### \1\n",
2188 log,
2189 )
2190 log = re.sub("memstats: ", " ", log)
2191 leakfile.write(log)
2192 leakfile.write("\n")
2193 if leakfound:
2194 leakfile.close()
2195
2196
2197 def frr_unicode(s):
2198 """Convert string to unicode, depending on python version"""
2199 if sys.version_info[0] > 2:
2200 return s
2201 else:
2202 return unicode(s) # pylint: disable=E0602
2203
2204
2205 def is_mapping(o):
2206 return isinstance(o, Mapping)