]> git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/lib/topotest.py
Merge pull request #12367 from rgirada/ospf_valgrind_fix
[mirror_frr.git] / tests / topotests / lib / topotest.py
1 #!/usr/bin/env python
2
3 #
4 # topotest.py
5 # Library of helper functions for NetDEF Topology Tests
6 #
7 # Copyright (c) 2016 by
8 # Network Device Education Foundation, Inc. ("NetDEF")
9 #
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
13 # in all copies.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 # OF THIS SOFTWARE.
23 #
24
25 import difflib
26 import errno
27 import functools
28 import glob
29 import json
30 import os
31 import pdb
32 import platform
33 import re
34 import resource
35 import signal
36 import subprocess
37 import sys
38 import tempfile
39 import time
40 from copy import deepcopy
41
42 import lib.topolog as topolog
43 from lib.topolog import logger
44
45 if sys.version_info[0] > 2:
46 import configparser
47 from collections.abc import Mapping
48 else:
49 import ConfigParser as configparser
50 from collections import Mapping
51
52 from lib import micronet
53 from lib.micronet_compat import Node
54
55 g_extra_config = {}
56
57
58 def get_logs_path(rundir):
59 logspath = topolog.get_test_logdir()
60 return os.path.join(rundir, logspath)
61
62
63 def gdb_core(obj, daemon, corefiles):
64 gdbcmds = """
65 info threads
66 bt full
67 disassemble
68 up
69 disassemble
70 up
71 disassemble
72 up
73 disassemble
74 up
75 disassemble
76 up
77 disassemble
78 """
79 gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")]
80 gdbcmds = [item for sl in gdbcmds for item in sl]
81
82 daemon_path = os.path.join(obj.daemondir, daemon)
83 backtrace = subprocess.check_output(
84 ["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds
85 )
86 sys.stderr.write(
87 "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon)
88 )
89 sys.stderr.write("%s" % backtrace)
90 return backtrace
91
92
93 class json_cmp_result(object):
94 "json_cmp result class for better assertion messages"
95
96 def __init__(self):
97 self.errors = []
98
99 def add_error(self, error):
100 "Append error message to the result"
101 for line in error.splitlines():
102 self.errors.append(line)
103
104 def has_errors(self):
105 "Returns True if there were errors, otherwise False."
106 return len(self.errors) > 0
107
108 def gen_report(self):
109 headline = ["Generated JSON diff error report:", ""]
110 return headline + self.errors
111
112 def __str__(self):
113 return (
114 "Generated JSON diff error report:\n\n\n" + "\n".join(self.errors) + "\n\n"
115 )
116
117
118 def gen_json_diff_report(d1, d2, exact=False, path="> $", acc=(0, "")):
119 """
120 Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye.
121 """
122
123 def dump_json(v):
124 if isinstance(v, (dict, list)):
125 return "\t" + "\t".join(
126 json.dumps(v, indent=4, separators=(",", ": ")).splitlines(True)
127 )
128 else:
129 return "'{}'".format(v)
130
131 def json_type(v):
132 if isinstance(v, (list, tuple)):
133 return "Array"
134 elif isinstance(v, dict):
135 return "Object"
136 elif isinstance(v, (int, float)):
137 return "Number"
138 elif isinstance(v, bool):
139 return "Boolean"
140 elif isinstance(v, str):
141 return "String"
142 elif v == None:
143 return "null"
144
145 def get_errors(other_acc):
146 return other_acc[1]
147
148 def get_errors_n(other_acc):
149 return other_acc[0]
150
151 def add_error(acc, msg, points=1):
152 return (acc[0] + points, acc[1] + "{}: {}\n".format(path, msg))
153
154 def merge_errors(acc, other_acc):
155 return (acc[0] + other_acc[0], acc[1] + other_acc[1])
156
157 def add_idx(idx):
158 return "{}[{}]".format(path, idx)
159
160 def add_key(key):
161 return "{}->{}".format(path, key)
162
163 def has_errors(other_acc):
164 return other_acc[0] > 0
165
166 if d2 == "*" or (
167 not isinstance(d1, (list, dict))
168 and not isinstance(d2, (list, dict))
169 and d1 == d2
170 ):
171 return acc
172 elif (
173 not isinstance(d1, (list, dict))
174 and not isinstance(d2, (list, dict))
175 and d1 != d2
176 ):
177 acc = add_error(
178 acc,
179 "d1 has element with value '{}' but in d2 it has value '{}'".format(d1, d2),
180 )
181 elif (
182 isinstance(d1, list)
183 and isinstance(d2, list)
184 and ((len(d2) > 0 and d2[0] == "__ordered__") or exact)
185 ):
186 if not exact:
187 del d2[0]
188 if len(d1) != len(d2):
189 acc = add_error(
190 acc,
191 "d1 has Array of length {} but in d2 it is of length {}".format(
192 len(d1), len(d2)
193 ),
194 )
195 else:
196 for idx, v1, v2 in zip(range(0, len(d1)), d1, d2):
197 acc = merge_errors(
198 acc, gen_json_diff_report(v1, v2, exact=exact, path=add_idx(idx))
199 )
200 elif isinstance(d1, list) and isinstance(d2, list):
201 if len(d1) < len(d2):
202 acc = add_error(
203 acc,
204 "d1 has Array of length {} but in d2 it is of length {}".format(
205 len(d1), len(d2)
206 ),
207 )
208 else:
209 for idx2, v2 in zip(range(0, len(d2)), d2):
210 found_match = False
211 closest_diff = None
212 closest_idx = None
213 for idx1, v1 in zip(range(0, len(d1)), d1):
214 tmp_v1 = deepcopy(v1)
215 tmp_v2 = deepcopy(v2)
216 tmp_diff = gen_json_diff_report(tmp_v1, tmp_v2, path=add_idx(idx1))
217 if not has_errors(tmp_diff):
218 found_match = True
219 del d1[idx1]
220 break
221 elif not closest_diff or get_errors_n(tmp_diff) < get_errors_n(
222 closest_diff
223 ):
224 closest_diff = tmp_diff
225 closest_idx = idx1
226 if not found_match and isinstance(v2, (list, dict)):
227 sub_error = "\n\n\t{}".format(
228 "\t".join(get_errors(closest_diff).splitlines(True))
229 )
230 acc = add_error(
231 acc,
232 (
233 "d2 has the following element at index {} which is not present in d1: "
234 + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}"
235 ).format(idx2, dump_json(v2), closest_idx, sub_error),
236 )
237 if not found_match and not isinstance(v2, (list, dict)):
238 acc = add_error(
239 acc,
240 "d2 has the following element at index {} which is not present in d1: {}".format(
241 idx2, dump_json(v2)
242 ),
243 )
244 elif isinstance(d1, dict) and isinstance(d2, dict) and exact:
245 invalid_keys_d1 = [k for k in d1.keys() if k not in d2.keys()]
246 invalid_keys_d2 = [k for k in d2.keys() if k not in d1.keys()]
247 for k in invalid_keys_d1:
248 acc = add_error(acc, "d1 has key '{}' which is not present in d2".format(k))
249 for k in invalid_keys_d2:
250 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
251 valid_keys_intersection = [k for k in d1.keys() if k in d2.keys()]
252 for k in valid_keys_intersection:
253 acc = merge_errors(
254 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
255 )
256 elif isinstance(d1, dict) and isinstance(d2, dict):
257 none_keys = [k for k, v in d2.items() if v == None]
258 none_keys_present = [k for k in d1.keys() if k in none_keys]
259 for k in none_keys_present:
260 acc = add_error(
261 acc, "d1 has key '{}' which is not supposed to be present".format(k)
262 )
263 keys = [k for k, v in d2.items() if v != None]
264 invalid_keys_intersection = [k for k in keys if k not in d1.keys()]
265 for k in invalid_keys_intersection:
266 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
267 valid_keys_intersection = [k for k in keys if k in d1.keys()]
268 for k in valid_keys_intersection:
269 acc = merge_errors(
270 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
271 )
272 else:
273 acc = add_error(
274 acc,
275 "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format(
276 json_type(d1), json_type(d2)
277 ),
278 points=2,
279 )
280
281 return acc
282
283
284 def json_cmp(d1, d2, exact=False):
285 """
286 JSON compare function. Receives two parameters:
287 * `d1`: parsed JSON data structure
288 * `d2`: parsed JSON data structure
289
290 Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
291 in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an
292 error report is generated and wrapped in a 'json_cmp_result()'. There are special
293 parameters and notations explained below which can be used to cover rather unusual
294 cases:
295
296 * when 'exact is set to 'True' then d1 and d2 are tested for equality (including
297 order within JSON Arrays)
298 * using 'null' (or 'None' in Python) as JSON Object value is checking for key
299 absence in d1
300 * using '*' as JSON Object value or Array value is checking for presence in d1
301 without checking the values
302 * using '__ordered__' as first element in a JSON Array in d2 will also check the
303 order when it is compared to an Array in d1
304 """
305
306 (errors_n, errors) = gen_json_diff_report(deepcopy(d1), deepcopy(d2), exact=exact)
307
308 if errors_n > 0:
309 result = json_cmp_result()
310 result.add_error(errors)
311 return result
312 else:
313 return None
314
315
316 def router_output_cmp(router, cmd, expected):
317 """
318 Runs `cmd` in router and compares the output with `expected`.
319 """
320 return difflines(
321 normalize_text(router.vtysh_cmd(cmd)),
322 normalize_text(expected),
323 title1="Current output",
324 title2="Expected output",
325 )
326
327
328 def router_json_cmp(router, cmd, data, exact=False):
329 """
330 Runs `cmd` that returns JSON data (normally the command ends with 'json')
331 and compare with `data` contents.
332 """
333 return json_cmp(router.vtysh_cmd(cmd, isjson=True), data, exact)
334
335
336 def run_and_expect(func, what, count=20, wait=3):
337 """
338 Run `func` and compare the result with `what`. Do it for `count` times
339 waiting `wait` seconds between tries. By default it tries 20 times with
340 3 seconds delay between tries.
341
342 Returns (True, func-return) on success or
343 (False, func-return) on failure.
344
345 ---
346
347 Helper functions to use with this function:
348 - router_output_cmp
349 - router_json_cmp
350 """
351 start_time = time.time()
352 func_name = "<unknown>"
353 if func.__class__ == functools.partial:
354 func_name = func.func.__name__
355 else:
356 func_name = func.__name__
357
358 # Just a safety-check to avoid running topotests with very
359 # small wait/count arguments.
360 wait_time = wait * count
361 if wait_time < 5:
362 assert (
363 wait_time >= 5
364 ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
365 count, wait
366 )
367
368 logger.info(
369 "'{}' polling started (interval {} secs, maximum {} tries)".format(
370 func_name, wait, count
371 )
372 )
373
374 while count > 0:
375 result = func()
376 if result != what:
377 time.sleep(wait)
378 count -= 1
379 continue
380
381 end_time = time.time()
382 logger.info(
383 "'{}' succeeded after {:.2f} seconds".format(
384 func_name, end_time - start_time
385 )
386 )
387 return (True, result)
388
389 end_time = time.time()
390 logger.error(
391 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
392 )
393 return (False, result)
394
395
396 def run_and_expect_type(func, etype, count=20, wait=3, avalue=None):
397 """
398 Run `func` and compare the result with `etype`. Do it for `count` times
399 waiting `wait` seconds between tries. By default it tries 20 times with
400 3 seconds delay between tries.
401
402 This function is used when you want to test the return type and,
403 optionally, the return value.
404
405 Returns (True, func-return) on success or
406 (False, func-return) on failure.
407 """
408 start_time = time.time()
409 func_name = "<unknown>"
410 if func.__class__ == functools.partial:
411 func_name = func.func.__name__
412 else:
413 func_name = func.__name__
414
415 # Just a safety-check to avoid running topotests with very
416 # small wait/count arguments.
417 wait_time = wait * count
418 if wait_time < 5:
419 assert (
420 wait_time >= 5
421 ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
422 count, wait
423 )
424
425 logger.info(
426 "'{}' polling started (interval {} secs, maximum wait {} secs)".format(
427 func_name, wait, int(wait * count)
428 )
429 )
430
431 while count > 0:
432 result = func()
433 if not isinstance(result, etype):
434 logger.debug(
435 "Expected result type '{}' got '{}' instead".format(etype, type(result))
436 )
437 time.sleep(wait)
438 count -= 1
439 continue
440
441 if etype != type(None) and avalue != None and result != avalue:
442 logger.debug("Expected value '{}' got '{}' instead".format(avalue, result))
443 time.sleep(wait)
444 count -= 1
445 continue
446
447 end_time = time.time()
448 logger.info(
449 "'{}' succeeded after {:.2f} seconds".format(
450 func_name, end_time - start_time
451 )
452 )
453 return (True, result)
454
455 end_time = time.time()
456 logger.error(
457 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
458 )
459 return (False, result)
460
461
462 def router_json_cmp_retry(router, cmd, data, exact=False, retry_timeout=10.0):
463 """
464 Runs `cmd` that returns JSON data (normally the command ends with 'json')
465 and compare with `data` contents. Retry by default for 10 seconds
466 """
467
468 def test_func():
469 return router_json_cmp(router, cmd, data, exact)
470
471 ok, _ = run_and_expect(test_func, None, int(retry_timeout), 1)
472 return ok
473
474
475 def int2dpid(dpid):
476 "Converting Integer to DPID"
477
478 try:
479 dpid = hex(dpid)[2:]
480 dpid = "0" * (16 - len(dpid)) + dpid
481 return dpid
482 except IndexError:
483 raise Exception(
484 "Unable to derive default datapath ID - "
485 "please either specify a dpid or use a "
486 "canonical switch name such as s23."
487 )
488
489
490 def pid_exists(pid):
491 "Check whether pid exists in the current process table."
492
493 if pid <= 0:
494 return False
495 try:
496 os.waitpid(pid, os.WNOHANG)
497 except:
498 pass
499 try:
500 os.kill(pid, 0)
501 except OSError as err:
502 if err.errno == errno.ESRCH:
503 # ESRCH == No such process
504 return False
505 elif err.errno == errno.EPERM:
506 # EPERM clearly means there's a process to deny access to
507 return True
508 else:
509 # According to "man 2 kill" possible error values are
510 # (EINVAL, EPERM, ESRCH)
511 raise
512 else:
513 return True
514
515
516 def get_textdiff(text1, text2, title1="", title2="", **opts):
517 "Returns empty string if same or formatted diff"
518
519 diff = "\n".join(
520 difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts)
521 )
522 # Clean up line endings
523 diff = os.linesep.join([s for s in diff.splitlines() if s])
524 return diff
525
526
527 def difflines(text1, text2, title1="", title2="", **opts):
528 "Wrapper for get_textdiff to avoid string transformations."
529 text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1)
530 text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1)
531 return get_textdiff(text1, text2, title1, title2, **opts)
532
533
534 def get_file(content):
535 """
536 Generates a temporary file in '/tmp' with `content` and returns the file name.
537 """
538 if isinstance(content, list) or isinstance(content, tuple):
539 content = "\n".join(content)
540 fde = tempfile.NamedTemporaryFile(mode="w", delete=False)
541 fname = fde.name
542 fde.write(content)
543 fde.close()
544 return fname
545
546
547 def normalize_text(text):
548 """
549 Strips formating spaces/tabs, carriage returns and trailing whitespace.
550 """
551 text = re.sub(r"[ \t]+", " ", text)
552 text = re.sub(r"\r", "", text)
553
554 # Remove whitespace in the middle of text.
555 text = re.sub(r"[ \t]+\n", "\n", text)
556 # Remove whitespace at the end of the text.
557 text = text.rstrip()
558
559 return text
560
561
562 def is_linux():
563 """
564 Parses unix name output to check if running on GNU/Linux.
565
566 Returns True if running on Linux, returns False otherwise.
567 """
568
569 if os.uname()[0] == "Linux":
570 return True
571 return False
572
573
574 def iproute2_is_vrf_capable():
575 """
576 Checks if the iproute2 version installed on the system is capable of
577 handling VRFs by interpreting the output of the 'ip' utility found in PATH.
578
579 Returns True if capability can be detected, returns False otherwise.
580 """
581
582 if is_linux():
583 try:
584 subp = subprocess.Popen(
585 ["ip", "route", "show", "vrf"],
586 stdout=subprocess.PIPE,
587 stderr=subprocess.PIPE,
588 stdin=subprocess.PIPE,
589 )
590 iproute2_err = subp.communicate()[1].splitlines()[0].split()[0]
591
592 if iproute2_err != "Error:":
593 return True
594 except Exception:
595 pass
596 return False
597
598
599 def module_present_linux(module, load):
600 """
601 Returns whether `module` is present.
602
603 If `load` is true, it will try to load it via modprobe.
604 """
605 with open("/proc/modules", "r") as modules_file:
606 if module.replace("-", "_") in modules_file.read():
607 return True
608 cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module)
609 if os.system(cmd) != 0:
610 return False
611 else:
612 return True
613
614
615 def module_present_freebsd(module, load):
616 return True
617
618
619 def module_present(module, load=True):
620 if sys.platform.startswith("linux"):
621 return module_present_linux(module, load)
622 elif sys.platform.startswith("freebsd"):
623 return module_present_freebsd(module, load)
624
625
626 def version_cmp(v1, v2):
627 """
628 Compare two version strings and returns:
629
630 * `-1`: if `v1` is less than `v2`
631 * `0`: if `v1` is equal to `v2`
632 * `1`: if `v1` is greater than `v2`
633
634 Raises `ValueError` if versions are not well formated.
635 """
636 vregex = r"(?P<whole>\d+(\.(\d+))*)"
637 v1m = re.match(vregex, v1)
638 v2m = re.match(vregex, v2)
639 if v1m is None or v2m is None:
640 raise ValueError("got a invalid version string")
641
642 # Split values
643 v1g = v1m.group("whole").split(".")
644 v2g = v2m.group("whole").split(".")
645
646 # Get the longest version string
647 vnum = len(v1g)
648 if len(v2g) > vnum:
649 vnum = len(v2g)
650
651 # Reverse list because we are going to pop the tail
652 v1g.reverse()
653 v2g.reverse()
654 for _ in range(vnum):
655 try:
656 v1n = int(v1g.pop())
657 except IndexError:
658 while v2g:
659 v2n = int(v2g.pop())
660 if v2n > 0:
661 return -1
662 break
663
664 try:
665 v2n = int(v2g.pop())
666 except IndexError:
667 if v1n > 0:
668 return 1
669 while v1g:
670 v1n = int(v1g.pop())
671 if v1n > 0:
672 return 1
673 break
674
675 if v1n > v2n:
676 return 1
677 if v1n < v2n:
678 return -1
679 return 0
680
681
682 def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None):
683 if ifaceaction:
684 str_ifaceaction = "no shutdown"
685 else:
686 str_ifaceaction = "shutdown"
687 if vrf_name == None:
688 cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
689 ifacename, str_ifaceaction
690 )
691 else:
692 cmd = (
693 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
694 ifacename, vrf_name, str_ifaceaction
695 )
696 )
697 node.run(cmd)
698
699
700 def ip4_route_zebra(node, vrf_name=None):
701 """
702 Gets an output of 'show ip route' command. It can be used
703 with comparing the output to a reference
704 """
705 if vrf_name == None:
706 tmp = node.vtysh_cmd("show ip route")
707 else:
708 tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name))
709 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
710
711 lines = output.splitlines()
712 header_found = False
713 while lines and (not lines[0].strip() or not header_found):
714 if "o - offload failure" in lines[0]:
715 header_found = True
716 lines = lines[1:]
717 return "\n".join(lines)
718
719
720 def ip6_route_zebra(node, vrf_name=None):
721 """
722 Retrieves the output of 'show ipv6 route [vrf vrf_name]', then
723 canonicalizes it by eliding link-locals.
724 """
725
726 if vrf_name == None:
727 tmp = node.vtysh_cmd("show ipv6 route")
728 else:
729 tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name))
730
731 # Mask out timestamp
732 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
733
734 # Mask out the link-local addresses
735 output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output)
736
737 lines = output.splitlines()
738 header_found = False
739 while lines and (not lines[0].strip() or not header_found):
740 if "o - offload failure" in lines[0]:
741 header_found = True
742 lines = lines[1:]
743
744 return "\n".join(lines)
745
746
747 def proto_name_to_number(protocol):
748 return {
749 "bgp": "186",
750 "isis": "187",
751 "ospf": "188",
752 "rip": "189",
753 "ripng": "190",
754 "nhrp": "191",
755 "eigrp": "192",
756 "ldp": "193",
757 "sharp": "194",
758 "pbr": "195",
759 "static": "196",
760 "ospf6": "197",
761 }.get(
762 protocol, protocol
763 ) # default return same as input
764
765
766 def ip4_route(node):
767 """
768 Gets a structured return of the command 'ip route'. It can be used in
769 conjunction with json_cmp() to provide accurate assert explanations.
770
771 Return example:
772 {
773 '10.0.1.0/24': {
774 'dev': 'eth0',
775 'via': '172.16.0.1',
776 'proto': '188',
777 },
778 '10.0.2.0/24': {
779 'dev': 'eth1',
780 'proto': 'kernel',
781 }
782 }
783 """
784 output = normalize_text(node.run("ip route")).splitlines()
785 result = {}
786 for line in output:
787 columns = line.split(" ")
788 route = result[columns[0]] = {}
789 prev = None
790 for column in columns:
791 if prev == "dev":
792 route["dev"] = column
793 if prev == "via":
794 route["via"] = column
795 if prev == "proto":
796 # translate protocol names back to numbers
797 route["proto"] = proto_name_to_number(column)
798 if prev == "metric":
799 route["metric"] = column
800 if prev == "scope":
801 route["scope"] = column
802 prev = column
803
804 return result
805
806
807 def ip4_vrf_route(node):
808 """
809 Gets a structured return of the command 'ip route show vrf {0}-cust1'.
810 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
811
812 Return example:
813 {
814 '10.0.1.0/24': {
815 'dev': 'eth0',
816 'via': '172.16.0.1',
817 'proto': '188',
818 },
819 '10.0.2.0/24': {
820 'dev': 'eth1',
821 'proto': 'kernel',
822 }
823 }
824 """
825 output = normalize_text(
826 node.run("ip route show vrf {0}-cust1".format(node.name))
827 ).splitlines()
828
829 result = {}
830 for line in output:
831 columns = line.split(" ")
832 route = result[columns[0]] = {}
833 prev = None
834 for column in columns:
835 if prev == "dev":
836 route["dev"] = column
837 if prev == "via":
838 route["via"] = column
839 if prev == "proto":
840 # translate protocol names back to numbers
841 route["proto"] = proto_name_to_number(column)
842 if prev == "metric":
843 route["metric"] = column
844 if prev == "scope":
845 route["scope"] = column
846 prev = column
847
848 return result
849
850
851 def ip6_route(node):
852 """
853 Gets a structured return of the command 'ip -6 route'. It can be used in
854 conjunction with json_cmp() to provide accurate assert explanations.
855
856 Return example:
857 {
858 '2001:db8:1::/64': {
859 'dev': 'eth0',
860 'proto': '188',
861 },
862 '2001:db8:2::/64': {
863 'dev': 'eth1',
864 'proto': 'kernel',
865 }
866 }
867 """
868 output = normalize_text(node.run("ip -6 route")).splitlines()
869 result = {}
870 for line in output:
871 columns = line.split(" ")
872 route = result[columns[0]] = {}
873 prev = None
874 for column in columns:
875 if prev == "dev":
876 route["dev"] = column
877 if prev == "via":
878 route["via"] = column
879 if prev == "proto":
880 # translate protocol names back to numbers
881 route["proto"] = proto_name_to_number(column)
882 if prev == "metric":
883 route["metric"] = column
884 if prev == "pref":
885 route["pref"] = column
886 prev = column
887
888 return result
889
890
891 def ip6_vrf_route(node):
892 """
893 Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'.
894 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
895
896 Return example:
897 {
898 '2001:db8:1::/64': {
899 'dev': 'eth0',
900 'proto': '188',
901 },
902 '2001:db8:2::/64': {
903 'dev': 'eth1',
904 'proto': 'kernel',
905 }
906 }
907 """
908 output = normalize_text(
909 node.run("ip -6 route show vrf {0}-cust1".format(node.name))
910 ).splitlines()
911 result = {}
912 for line in output:
913 columns = line.split(" ")
914 route = result[columns[0]] = {}
915 prev = None
916 for column in columns:
917 if prev == "dev":
918 route["dev"] = column
919 if prev == "via":
920 route["via"] = column
921 if prev == "proto":
922 # translate protocol names back to numbers
923 route["proto"] = proto_name_to_number(column)
924 if prev == "metric":
925 route["metric"] = column
926 if prev == "pref":
927 route["pref"] = column
928 prev = column
929
930 return result
931
932
933 def ip_rules(node):
934 """
935 Gets a structured return of the command 'ip rule'. It can be used in
936 conjunction with json_cmp() to provide accurate assert explanations.
937
938 Return example:
939 [
940 {
941 "pref": "0"
942 "from": "all"
943 },
944 {
945 "pref": "32766"
946 "from": "all"
947 },
948 {
949 "to": "3.4.5.0/24",
950 "iif": "r1-eth2",
951 "pref": "304",
952 "from": "1.2.0.0/16",
953 "proto": "zebra"
954 }
955 ]
956 """
957 output = normalize_text(node.run("ip rule")).splitlines()
958 result = []
959 for line in output:
960 columns = line.split(" ")
961
962 route = {}
963 # remove last character, since it is ':'
964 pref = columns[0][:-1]
965 route["pref"] = pref
966 prev = None
967 for column in columns:
968 if prev == "from":
969 route["from"] = column
970 if prev == "to":
971 route["to"] = column
972 if prev == "proto":
973 route["proto"] = column
974 if prev == "iif":
975 route["iif"] = column
976 if prev == "fwmark":
977 route["fwmark"] = column
978 prev = column
979
980 result.append(route)
981 return result
982
983
984 def sleep(amount, reason=None):
985 """
986 Sleep wrapper that registers in the log the amount of sleep
987 """
988 if reason is None:
989 logger.info("Sleeping for {} seconds".format(amount))
990 else:
991 logger.info(reason + " ({} seconds)".format(amount))
992
993 time.sleep(amount)
994
995
996 def checkAddressSanitizerError(output, router, component, logdir=""):
997 "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
998
999 def processAddressSanitizerError(asanErrorRe, output, router, component):
1000 sys.stderr.write(
1001 "%s: %s triggered an exception by AddressSanitizer\n" % (router, component)
1002 )
1003 # Sanitizer Error found in log
1004 pidMark = asanErrorRe.group(1)
1005 addressSanitizerLog = re.search(
1006 "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL
1007 )
1008 if addressSanitizerLog:
1009 # Find Calling Test. Could be multiple steps back
1010 testframe = sys._current_frames().values()[0]
1011 level = 0
1012 while level < 10:
1013 test = os.path.splitext(
1014 os.path.basename(testframe.f_globals["__file__"])
1015 )[0]
1016 if (test != "topotest") and (test != "topogen"):
1017 # Found the calling test
1018 callingTest = os.path.basename(testframe.f_globals["__file__"])
1019 break
1020 level = level + 1
1021 testframe = testframe.f_back
1022 if level >= 10:
1023 # somehow couldn't find the test script.
1024 callingTest = "unknownTest"
1025 #
1026 # Now finding Calling Procedure
1027 level = 0
1028 while level < 20:
1029 callingProc = sys._getframe(level).f_code.co_name
1030 if (
1031 (callingProc != "processAddressSanitizerError")
1032 and (callingProc != "checkAddressSanitizerError")
1033 and (callingProc != "checkRouterCores")
1034 and (callingProc != "stopRouter")
1035 and (callingProc != "stop")
1036 and (callingProc != "stop_topology")
1037 and (callingProc != "checkRouterRunning")
1038 and (callingProc != "check_router_running")
1039 and (callingProc != "routers_have_failure")
1040 ):
1041 # Found the calling test
1042 break
1043 level = level + 1
1044 if level >= 20:
1045 # something wrong - couldn't found the calling test function
1046 callingProc = "unknownProc"
1047 with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile:
1048 sys.stderr.write(
1049 "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1050 % (callingTest, callingProc, router)
1051 )
1052 sys.stderr.write(
1053 "\n".join(addressSanitizerLog.group(1).splitlines()) + "\n"
1054 )
1055 addrSanFile.write("## Error: %s\n\n" % asanErrorRe.group(2))
1056 addrSanFile.write(
1057 "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1058 % (callingTest, callingProc, router)
1059 )
1060 addrSanFile.write(
1061 " "
1062 + "\n ".join(addressSanitizerLog.group(1).splitlines())
1063 + "\n"
1064 )
1065 addrSanFile.write("\n---------------\n")
1066 return
1067
1068 addressSanitizerError = re.search(
1069 r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
1070 )
1071 if addressSanitizerError:
1072 processAddressSanitizerError(addressSanitizerError, output, router, component)
1073 return True
1074
1075 # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
1076 if logdir:
1077 filepattern = logdir + "/" + router + "/" + component + ".asan.*"
1078 logger.debug(
1079 "Log check for %s on %s, pattern %s\n" % (component, router, filepattern)
1080 )
1081 for file in glob.glob(filepattern):
1082 with open(file, "r") as asanErrorFile:
1083 asanError = asanErrorFile.read()
1084 addressSanitizerError = re.search(
1085 r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
1086 )
1087 if addressSanitizerError:
1088 processAddressSanitizerError(
1089 addressSanitizerError, asanError, router, component
1090 )
1091 return True
1092 return False
1093
1094
1095 def _sysctl_atleast(commander, variable, min_value):
1096 if isinstance(min_value, tuple):
1097 min_value = list(min_value)
1098 is_list = isinstance(min_value, list)
1099
1100 sval = commander.cmd_raises("sysctl -n " + variable).strip()
1101 if is_list:
1102 cur_val = [int(x) for x in sval.split()]
1103 else:
1104 cur_val = int(sval)
1105
1106 set_value = False
1107 if is_list:
1108 for i, v in enumerate(cur_val):
1109 if v < min_value[i]:
1110 set_value = True
1111 else:
1112 min_value[i] = v
1113 else:
1114 if cur_val < min_value:
1115 set_value = True
1116 if set_value:
1117 if is_list:
1118 valstr = " ".join([str(x) for x in min_value])
1119 else:
1120 valstr = str(min_value)
1121 logger.info("Increasing sysctl %s from %s to %s", variable, cur_val, valstr)
1122 commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
1123
1124
1125 def _sysctl_assure(commander, variable, value):
1126 if isinstance(value, tuple):
1127 value = list(value)
1128 is_list = isinstance(value, list)
1129
1130 sval = commander.cmd_raises("sysctl -n " + variable).strip()
1131 if is_list:
1132 cur_val = [int(x) for x in sval.split()]
1133 else:
1134 cur_val = sval
1135
1136 set_value = False
1137 if is_list:
1138 for i, v in enumerate(cur_val):
1139 if v != value[i]:
1140 set_value = True
1141 else:
1142 value[i] = v
1143 else:
1144 if cur_val != str(value):
1145 set_value = True
1146
1147 if set_value:
1148 if is_list:
1149 valstr = " ".join([str(x) for x in value])
1150 else:
1151 valstr = str(value)
1152 logger.info("Changing sysctl %s from %s to %s", variable, cur_val, valstr)
1153 commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
1154
1155
1156 def sysctl_atleast(commander, variable, min_value, raises=False):
1157 try:
1158 if commander is None:
1159 commander = micronet.Commander("topotest")
1160 return _sysctl_atleast(commander, variable, min_value)
1161 except subprocess.CalledProcessError as error:
1162 logger.warning(
1163 "%s: Failed to assure sysctl min value %s = %s",
1164 commander,
1165 variable,
1166 min_value,
1167 )
1168 if raises:
1169 raise
1170
1171
1172 def sysctl_assure(commander, variable, value, raises=False):
1173 try:
1174 if commander is None:
1175 commander = micronet.Commander("topotest")
1176 return _sysctl_assure(commander, variable, value)
1177 except subprocess.CalledProcessError as error:
1178 logger.warning(
1179 "%s: Failed to assure sysctl value %s = %s",
1180 commander,
1181 variable,
1182 value,
1183 exc_info=True,
1184 )
1185 if raises:
1186 raise
1187
1188
1189 def rlimit_atleast(rname, min_value, raises=False):
1190 try:
1191 cval = resource.getrlimit(rname)
1192 soft, hard = cval
1193 if soft < min_value:
1194 nval = (min_value, hard if min_value < hard else min_value)
1195 logger.info("Increasing rlimit %s from %s to %s", rname, cval, nval)
1196 resource.setrlimit(rname, nval)
1197 except subprocess.CalledProcessError as error:
1198 logger.warning(
1199 "Failed to assure rlimit [%s] = %s", rname, min_value, exc_info=True
1200 )
1201 if raises:
1202 raise
1203
1204
1205 def fix_netns_limits(ns):
1206
1207 # Maximum read and write socket buffer sizes
1208 sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20])
1209 sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20])
1210
1211 sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0)
1212 sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0)
1213 sysctl_assure(ns, "net.ipv4.conf.lo.rp_filter", 0)
1214
1215 sysctl_assure(ns, "net.ipv4.conf.all.forwarding", 1)
1216 sysctl_assure(ns, "net.ipv4.conf.default.forwarding", 1)
1217
1218 # XXX if things fail look here as this wasn't done previously
1219 sysctl_assure(ns, "net.ipv6.conf.all.forwarding", 1)
1220 sysctl_assure(ns, "net.ipv6.conf.default.forwarding", 1)
1221
1222 # ARP
1223 sysctl_assure(ns, "net.ipv4.conf.default.arp_announce", 2)
1224 sysctl_assure(ns, "net.ipv4.conf.default.arp_notify", 1)
1225 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1226 sysctl_assure(ns, "net.ipv4.conf.default.arp_ignore", 0)
1227 sysctl_assure(ns, "net.ipv4.conf.all.arp_announce", 2)
1228 sysctl_assure(ns, "net.ipv4.conf.all.arp_notify", 1)
1229 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1230 sysctl_assure(ns, "net.ipv4.conf.all.arp_ignore", 0)
1231
1232 sysctl_assure(ns, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
1233
1234 # Keep ipv6 permanent addresses on an admin down
1235 sysctl_assure(ns, "net.ipv6.conf.all.keep_addr_on_down", 1)
1236 if version_cmp(platform.release(), "4.20") >= 0:
1237 sysctl_assure(ns, "net.ipv6.route.skip_notify_on_dev_down", 1)
1238
1239 sysctl_assure(ns, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
1240 sysctl_assure(ns, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
1241
1242 # igmp
1243 sysctl_atleast(ns, "net.ipv4.igmp_max_memberships", 1000)
1244
1245 # Use neigh information on selection of nexthop for multipath hops
1246 sysctl_assure(ns, "net.ipv4.fib_multipath_use_neigh", 1)
1247
1248
1249 def fix_host_limits():
1250 """Increase system limits."""
1251
1252 rlimit_atleast(resource.RLIMIT_NPROC, 8 * 1024)
1253 rlimit_atleast(resource.RLIMIT_NOFILE, 16 * 1024)
1254 sysctl_atleast(None, "fs.file-max", 16 * 1024)
1255 sysctl_atleast(None, "kernel.pty.max", 16 * 1024)
1256
1257 # Enable coredumps
1258 # Original on ubuntu 17.x, but apport won't save as in namespace
1259 # |/usr/share/apport/apport %p %s %c %d %P
1260 sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
1261 sysctl_assure(None, "kernel.core_uses_pid", 1)
1262 sysctl_assure(None, "fs.suid_dumpable", 1)
1263
1264 # Maximum connection backlog
1265 sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
1266
1267 # Maximum read and write socket buffer sizes
1268 sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20)
1269 sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20)
1270
1271 # Garbage Collection Settings for ARP and Neighbors
1272 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
1273 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024)
1274 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024)
1275 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024)
1276 # Hold entries for 10 minutes
1277 sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1278 sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1279
1280 # igmp
1281 sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
1282
1283 # MLD
1284 sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
1285
1286 # Increase routing table size to 128K
1287 sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024)
1288 sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024)
1289
1290
1291 def setup_node_tmpdir(logdir, name):
1292 # Cleanup old log, valgrind, and core files.
1293 subprocess.check_call(
1294 "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir, name), shell=True
1295 )
1296
1297 # Setup the per node directory.
1298 nodelogdir = "{}/{}".format(logdir, name)
1299 subprocess.check_call(
1300 "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True
1301 )
1302 logfile = "{0}/{1}.log".format(logdir, name)
1303 return logfile
1304
1305
1306 class Router(Node):
1307 "A Node with IPv4/IPv6 forwarding enabled"
1308
1309 def __init__(self, name, **params):
1310
1311 # Backward compatibility:
1312 # Load configuration defaults like topogen.
1313 self.config_defaults = configparser.ConfigParser(
1314 defaults={
1315 "verbosity": "info",
1316 "frrdir": "/usr/lib/frr",
1317 "routertype": "frr",
1318 "memleak_path": "",
1319 }
1320 )
1321
1322 self.config_defaults.read(
1323 os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini")
1324 )
1325
1326 # If this topology is using old API and doesn't have logdir
1327 # specified, then attempt to generate an unique logdir.
1328 self.logdir = params.get("logdir")
1329 if self.logdir is None:
1330 self.logdir = get_logs_path(g_extra_config["rundir"])
1331
1332 if not params.get("logger"):
1333 # If logger is present topogen has already set this up
1334 logfile = setup_node_tmpdir(self.logdir, name)
1335 l = topolog.get_logger(name, log_level="debug", target=logfile)
1336 params["logger"] = l
1337
1338 super(Router, self).__init__(name, **params)
1339
1340 self.daemondir = None
1341 self.hasmpls = False
1342 self.routertype = "frr"
1343 self.unified_config = None
1344 self.daemons = {
1345 "zebra": 0,
1346 "ripd": 0,
1347 "ripngd": 0,
1348 "ospfd": 0,
1349 "ospf6d": 0,
1350 "isisd": 0,
1351 "bgpd": 0,
1352 "pimd": 0,
1353 "pim6d": 0,
1354 "ldpd": 0,
1355 "eigrpd": 0,
1356 "nhrpd": 0,
1357 "staticd": 0,
1358 "bfdd": 0,
1359 "sharpd": 0,
1360 "babeld": 0,
1361 "pbrd": 0,
1362 "pathd": 0,
1363 "snmpd": 0,
1364 }
1365 self.daemons_options = {"zebra": ""}
1366 self.reportCores = True
1367 self.version = None
1368
1369 self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid)
1370 try:
1371 # Allow escaping from running inside docker
1372 cgroup = open("/proc/1/cgroup").read()
1373 m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup)
1374 if m:
1375 self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd
1376 except IOError:
1377 pass
1378 else:
1379 logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd))
1380
1381 def _config_frr(self, **params):
1382 "Configure FRR binaries"
1383 self.daemondir = params.get("frrdir")
1384 if self.daemondir is None:
1385 self.daemondir = self.config_defaults.get("topogen", "frrdir")
1386
1387 zebra_path = os.path.join(self.daemondir, "zebra")
1388 if not os.path.isfile(zebra_path):
1389 raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
1390
1391 # pylint: disable=W0221
1392 # Some params are only meaningful for the parent class.
1393 def config(self, **params):
1394 super(Router, self).config(**params)
1395
1396 # User did not specify the daemons directory, try to autodetect it.
1397 self.daemondir = params.get("daemondir")
1398 if self.daemondir is None:
1399 self.routertype = params.get(
1400 "routertype", self.config_defaults.get("topogen", "routertype")
1401 )
1402 self._config_frr(**params)
1403 else:
1404 # Test the provided path
1405 zpath = os.path.join(self.daemondir, "zebra")
1406 if not os.path.isfile(zpath):
1407 raise Exception("No zebra binary found in {}".format(zpath))
1408 # Allow user to specify routertype when the path was specified.
1409 if params.get("routertype") is not None:
1410 self.routertype = params.get("routertype")
1411
1412 # Set ownership of config files
1413 self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype))
1414
1415 def terminate(self):
1416 # Stop running FRR daemons
1417 self.stopRouter()
1418 super(Router, self).terminate()
1419 os.system("chmod -R go+rw " + self.logdir)
1420
1421 # Return count of running daemons
1422 def listDaemons(self):
1423 ret = []
1424 rc, stdout, _ = self.cmd_status(
1425 "ls -1 /var/run/%s/*.pid" % self.routertype, warn=False
1426 )
1427 if rc:
1428 return ret
1429 for d in stdout.strip().split("\n"):
1430 pidfile = d.strip()
1431 try:
1432 pid = int(self.cmd_raises("cat %s" % pidfile, warn=False).strip())
1433 name = os.path.basename(pidfile[:-4])
1434
1435 # probably not compatible with bsd.
1436 rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False)
1437 if rc:
1438 logger.warning(
1439 "%s: %s exited leaving pidfile %s (%s)",
1440 self.name,
1441 name,
1442 pidfile,
1443 pid,
1444 )
1445 self.cmd("rm -- " + pidfile)
1446 else:
1447 ret.append((name, pid))
1448 except (subprocess.CalledProcessError, ValueError):
1449 pass
1450 return ret
1451
1452 def stopRouter(self, assertOnError=True, minErrorVersion="5.1"):
1453 # Stop Running FRR Daemons
1454 running = self.listDaemons()
1455 if not running:
1456 return ""
1457
1458 logger.info("%s: stopping %s", self.name, ", ".join([x[0] for x in running]))
1459 for name, pid in running:
1460 logger.info("{}: sending SIGTERM to {}".format(self.name, name))
1461 try:
1462 os.kill(pid, signal.SIGTERM)
1463 except OSError as err:
1464 logger.info(
1465 "%s: could not kill %s (%s): %s", self.name, name, pid, str(err)
1466 )
1467
1468 running = self.listDaemons()
1469 if running:
1470 for _ in range(0, 30):
1471 sleep(
1472 0.5,
1473 "{}: waiting for daemons stopping: {}".format(
1474 self.name, ", ".join([x[0] for x in running])
1475 ),
1476 )
1477 running = self.listDaemons()
1478 if not running:
1479 break
1480
1481 if not running:
1482 return ""
1483
1484 logger.warning(
1485 "%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running])
1486 )
1487 for name, pid in running:
1488 pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
1489 logger.info("%s: killing %s", self.name, name)
1490 self.cmd("kill -SIGBUS %d" % pid)
1491 self.cmd("rm -- " + pidfile)
1492
1493 sleep(
1494 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name
1495 )
1496
1497 errors = self.checkRouterCores(reportOnce=True)
1498 if self.checkRouterVersion("<", minErrorVersion):
1499 # ignore errors in old versions
1500 errors = ""
1501 if assertOnError and (errors is not None) and len(errors) > 0:
1502 assert "Errors found - details follow:" == 0, errors
1503 return errors
1504
1505 def removeIPs(self):
1506 for interface in self.intfNames():
1507 try:
1508 self.intf_ip_cmd(interface, "ip address flush " + interface)
1509 except Exception as ex:
1510 logger.error("%s can't remove IPs %s", self, str(ex))
1511 # pdb.set_trace()
1512 # assert False, "can't remove IPs %s" % str(ex)
1513
1514 def checkCapability(self, daemon, param):
1515 if param is not None:
1516 daemon_path = os.path.join(self.daemondir, daemon)
1517 daemon_search_option = param.replace("-", "")
1518 output = self.cmd(
1519 "{0} -h | grep {1}".format(daemon_path, daemon_search_option)
1520 )
1521 if daemon_search_option not in output:
1522 return False
1523 return True
1524
1525 def loadConf(self, daemon, source=None, param=None):
1526 """Enabled and set config for a daemon.
1527
1528 Arranges for loading of daemon configuration from the specified source. Possible
1529 `source` values are `None` for an empty config file, a path name which is used
1530 directly, or a file name with no path components which is first looked for
1531 directly and then looked for under a sub-directory named after router.
1532 """
1533
1534 # Unfortunately this API allowsfor source to not exist for any and all routers.
1535 if source:
1536 head, tail = os.path.split(source)
1537 if not head and not self.path_exists(tail):
1538 script_dir = os.environ["PYTEST_TOPOTEST_SCRIPTDIR"]
1539 router_relative = os.path.join(script_dir, self.name, tail)
1540 if self.path_exists(router_relative):
1541 source = router_relative
1542 self.logger.info(
1543 "using router relative configuration: {}".format(source)
1544 )
1545
1546 # print "Daemons before:", self.daemons
1547 if daemon in self.daemons.keys() or daemon == "frr":
1548 if daemon == "frr":
1549 self.unified_config = 1
1550 else:
1551 self.daemons[daemon] = 1
1552 if param is not None:
1553 self.daemons_options[daemon] = param
1554 conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon)
1555 if source is None or not os.path.exists(source):
1556 if daemon == "frr" or not self.unified_config:
1557 self.cmd_raises("rm -f " + conf_file)
1558 self.cmd_raises("touch " + conf_file)
1559 else:
1560 self.cmd_raises("cp {} {}".format(source, conf_file))
1561
1562 if not self.unified_config or daemon == "frr":
1563 self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file))
1564 self.cmd_raises("chmod 664 {}".format(conf_file))
1565
1566 if (daemon == "snmpd") and (self.routertype == "frr"):
1567 # /etc/snmp is private mount now
1568 self.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf')
1569 self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
1570
1571 if (daemon == "zebra") and (self.daemons["staticd"] == 0):
1572 # Add staticd with zebra - if it exists
1573 try:
1574 staticd_path = os.path.join(self.daemondir, "staticd")
1575 except:
1576 pdb.set_trace()
1577
1578 if os.path.isfile(staticd_path):
1579 self.daemons["staticd"] = 1
1580 self.daemons_options["staticd"] = ""
1581 # Auto-Started staticd has no config, so it will read from zebra config
1582 else:
1583 logger.info("No daemon {} known".format(daemon))
1584 # print "Daemons after:", self.daemons
1585
1586 def runInWindow(self, cmd, title=None):
1587 return self.run_in_window(cmd, title)
1588
1589 def startRouter(self, tgen=None):
1590 if self.unified_config:
1591 self.cmd(
1592 'echo "service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1593 % self.routertype
1594 )
1595 else:
1596 # Disable integrated-vtysh-config
1597 self.cmd(
1598 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1599 % self.routertype
1600 )
1601
1602 self.cmd(
1603 "chown %s:%svty /etc/%s/vtysh.conf"
1604 % (self.routertype, self.routertype, self.routertype)
1605 )
1606 # TODO remove the following lines after all tests are migrated to Topogen.
1607 # Try to find relevant old logfiles in /tmp and delete them
1608 map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
1609 # Remove old core files
1610 map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name)))
1611 # Remove IP addresses from OS first - we have them in zebra.conf
1612 self.removeIPs()
1613 # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
1614 # No error - but return message and skip all the tests
1615 if self.daemons["ldpd"] == 1:
1616 ldpd_path = os.path.join(self.daemondir, "ldpd")
1617 if not os.path.isfile(ldpd_path):
1618 logger.info("LDP Test, but no ldpd compiled or installed")
1619 return "LDP Test, but no ldpd compiled or installed"
1620
1621 if version_cmp(platform.release(), "4.5") < 0:
1622 logger.info("LDP Test need Linux Kernel 4.5 minimum")
1623 return "LDP Test need Linux Kernel 4.5 minimum"
1624 # Check if have mpls
1625 if tgen != None:
1626 self.hasmpls = tgen.hasmpls
1627 if self.hasmpls != True:
1628 logger.info(
1629 "LDP/MPLS Tests will be skipped, platform missing module(s)"
1630 )
1631 else:
1632 # Test for MPLS Kernel modules available
1633 self.hasmpls = False
1634 if not module_present("mpls-router"):
1635 logger.info(
1636 "MPLS tests will not run (missing mpls-router kernel module)"
1637 )
1638 elif not module_present("mpls-iptunnel"):
1639 logger.info(
1640 "MPLS tests will not run (missing mpls-iptunnel kernel module)"
1641 )
1642 else:
1643 self.hasmpls = True
1644 if self.hasmpls != True:
1645 return "LDP/MPLS Tests need mpls kernel modules"
1646
1647 # Really want to use sysctl_atleast here, but only when MPLS is actually being
1648 # used
1649 self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
1650
1651 shell_routers = g_extra_config["shell"]
1652 if "all" in shell_routers or self.name in shell_routers:
1653 self.run_in_window(os.getenv("SHELL", "bash"), title="sh-%s" % self.name)
1654
1655 if self.daemons["eigrpd"] == 1:
1656 eigrpd_path = os.path.join(self.daemondir, "eigrpd")
1657 if not os.path.isfile(eigrpd_path):
1658 logger.info("EIGRP Test, but no eigrpd compiled or installed")
1659 return "EIGRP Test, but no eigrpd compiled or installed"
1660
1661 if self.daemons["bfdd"] == 1:
1662 bfdd_path = os.path.join(self.daemondir, "bfdd")
1663 if not os.path.isfile(bfdd_path):
1664 logger.info("BFD Test, but no bfdd compiled or installed")
1665 return "BFD Test, but no bfdd compiled or installed"
1666
1667 status = self.startRouterDaemons(tgen=tgen)
1668
1669 vtysh_routers = g_extra_config["vtysh"]
1670 if "all" in vtysh_routers or self.name in vtysh_routers:
1671 self.run_in_window("vtysh", title="vt-%s" % self.name)
1672
1673 if self.unified_config:
1674 self.cmd("vtysh -f /etc/frr/frr.conf")
1675
1676 return status
1677
1678 def getStdErr(self, daemon):
1679 return self.getLog("err", daemon)
1680
1681 def getStdOut(self, daemon):
1682 return self.getLog("out", daemon)
1683
1684 def getLog(self, log, daemon):
1685 return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log))
1686
1687 def startRouterDaemons(self, daemons=None, tgen=None):
1688 "Starts FRR daemons for this router."
1689
1690 asan_abort = g_extra_config["asan_abort"]
1691 gdb_breakpoints = g_extra_config["gdb_breakpoints"]
1692 gdb_daemons = g_extra_config["gdb_daemons"]
1693 gdb_routers = g_extra_config["gdb_routers"]
1694 valgrind_extra = g_extra_config["valgrind_extra"]
1695 valgrind_memleaks = g_extra_config["valgrind_memleaks"]
1696 strace_daemons = g_extra_config["strace_daemons"]
1697
1698 # Get global bundle data
1699 if not self.path_exists("/etc/frr/support_bundle_commands.conf"):
1700 # Copy global value if was covered by namespace mount
1701 bundle_data = ""
1702 if os.path.exists("/etc/frr/support_bundle_commands.conf"):
1703 with open("/etc/frr/support_bundle_commands.conf", "r") as rf:
1704 bundle_data = rf.read()
1705 self.cmd_raises(
1706 "cat > /etc/frr/support_bundle_commands.conf",
1707 stdin=bundle_data,
1708 )
1709
1710 # Starts actual daemons without init (ie restart)
1711 # cd to per node directory
1712 self.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self.logdir, self.name))
1713 self.set_cwd("{}/{}".format(self.logdir, self.name))
1714 self.cmd("umask 000")
1715
1716 # Re-enable to allow for report per run
1717 self.reportCores = True
1718
1719 # XXX: glue code forward ported from removed function.
1720 if self.version == None:
1721 self.version = self.cmd(
1722 os.path.join(self.daemondir, "bgpd") + " -v"
1723 ).split()[2]
1724 logger.info("{}: running version: {}".format(self.name, self.version))
1725 # If `daemons` was specified then some upper API called us with
1726 # specific daemons, otherwise just use our own configuration.
1727 daemons_list = []
1728 if daemons is not None:
1729 daemons_list = daemons
1730 else:
1731 # Append all daemons configured.
1732 for daemon in self.daemons:
1733 if self.daemons[daemon] == 1:
1734 daemons_list.append(daemon)
1735
1736 def start_daemon(daemon, extra_opts=None):
1737 daemon_opts = self.daemons_options.get(daemon, "")
1738 rediropt = " > {0}.out 2> {0}.err".format(daemon)
1739 if daemon == "snmpd":
1740 binary = "/usr/sbin/snmpd"
1741 cmdenv = ""
1742 cmdopt = "{} -C -c /etc/frr/snmpd.conf -p ".format(
1743 daemon_opts
1744 ) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self.routertype)
1745 else:
1746 binary = os.path.join(self.daemondir, daemon)
1747
1748 cmdenv = "ASAN_OPTIONS="
1749 if asan_abort:
1750 cmdenv = "abort_on_error=1:"
1751 cmdenv += "log_path={0}/{1}.{2}.asan ".format(
1752 self.logdir, self.name, daemon
1753 )
1754
1755 if valgrind_memleaks:
1756 this_dir = os.path.dirname(
1757 os.path.abspath(os.path.realpath(__file__))
1758 )
1759 supp_file = os.path.abspath(
1760 os.path.join(this_dir, "../../../tools/valgrind.supp")
1761 )
1762 cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
1763 daemon, self.logdir, self.name, supp_file
1764 )
1765 if valgrind_extra:
1766 cmdenv += (
1767 " --gen-suppressions=all --expensive-definedness-checks=yes"
1768 )
1769 elif daemon in strace_daemons or "all" in strace_daemons:
1770 cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(
1771 daemon, self.logdir, self.name
1772 )
1773
1774 cmdopt = "{} --command-log-always --log file:{}.log --log-level debug".format(
1775 daemon_opts, daemon
1776 )
1777 if extra_opts:
1778 cmdopt += " " + extra_opts
1779
1780 if (
1781 (gdb_routers or gdb_daemons)
1782 and (
1783 not gdb_routers or self.name in gdb_routers or "all" in gdb_routers
1784 )
1785 and (not gdb_daemons or daemon in gdb_daemons or "all" in gdb_daemons)
1786 ):
1787 if daemon == "snmpd":
1788 cmdopt += " -f "
1789
1790 cmdopt += rediropt
1791 gdbcmd = "sudo -E gdb " + binary
1792 if gdb_breakpoints:
1793 gdbcmd += " -ex 'set breakpoint pending on'"
1794 for bp in gdb_breakpoints:
1795 gdbcmd += " -ex 'b {}'".format(bp)
1796 gdbcmd += " -ex 'run {}'".format(cmdopt)
1797
1798 self.run_in_window(gdbcmd, daemon)
1799
1800 logger.info(
1801 "%s: %s %s launched in gdb window", self, self.routertype, daemon
1802 )
1803 else:
1804 if daemon != "snmpd":
1805 cmdopt += " -d "
1806 cmdopt += rediropt
1807
1808 try:
1809 self.cmd_raises(" ".join([cmdenv, binary, cmdopt]), warn=False)
1810 except subprocess.CalledProcessError as error:
1811 self.logger.error(
1812 '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
1813 self,
1814 daemon,
1815 error.returncode,
1816 error.cmd,
1817 '\n:stdout: "{}"'.format(error.stdout.strip())
1818 if error.stdout
1819 else "",
1820 '\n:stderr: "{}"'.format(error.stderr.strip())
1821 if error.stderr
1822 else "",
1823 )
1824 else:
1825 logger.info("%s: %s %s started", self, self.routertype, daemon)
1826
1827 # Start Zebra first
1828 if "zebra" in daemons_list:
1829 start_daemon("zebra", "-s 90000000")
1830 while "zebra" in daemons_list:
1831 daemons_list.remove("zebra")
1832
1833 # Start staticd next if required
1834 if "staticd" in daemons_list:
1835 start_daemon("staticd")
1836 while "staticd" in daemons_list:
1837 daemons_list.remove("staticd")
1838
1839 if "snmpd" in daemons_list:
1840 # Give zerbra a chance to configure interface addresses that snmpd daemon
1841 # may then use.
1842 time.sleep(2)
1843
1844 start_daemon("snmpd")
1845 while "snmpd" in daemons_list:
1846 daemons_list.remove("snmpd")
1847
1848 if daemons is None:
1849 # Fix Link-Local Addresses on initial startup
1850 # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
1851 _, output, _ = self.cmd_status(
1852 "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done",
1853 stderr=subprocess.STDOUT,
1854 )
1855 logger.debug("Set MACs:\n%s", output)
1856
1857 # Now start all the other daemons
1858 for daemon in daemons_list:
1859 if self.daemons[daemon] == 0:
1860 continue
1861 start_daemon(daemon)
1862
1863 # Check if daemons are running.
1864 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1865 if re.search(r"No such file or directory", rundaemons):
1866 return "Daemons are not running"
1867
1868 # Update the permissions on the log files
1869 self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name))
1870 self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name))
1871
1872 return ""
1873
1874 def killRouterDaemons(
1875 self, daemons, wait=True, assertOnError=True, minErrorVersion="5.1"
1876 ):
1877 # Kill Running FRR
1878 # Daemons(user specified daemon only) using SIGKILL
1879 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1880 errors = ""
1881 daemonsNotRunning = []
1882 if re.search(r"No such file or directory", rundaemons):
1883 return errors
1884 for daemon in daemons:
1885 if rundaemons is not None and daemon in rundaemons:
1886 numRunning = 0
1887 dmns = rundaemons.split("\n")
1888 # Exclude empty string at end of list
1889 for d in dmns[:-1]:
1890 if re.search(r"%s" % daemon, d):
1891 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1892 if daemonpid.isdigit() and pid_exists(int(daemonpid)):
1893 logger.info(
1894 "{}: killing {}".format(
1895 self.name,
1896 os.path.basename(d.rstrip().rsplit(".", 1)[0]),
1897 )
1898 )
1899 self.cmd("kill -9 %s" % daemonpid)
1900 if pid_exists(int(daemonpid)):
1901 numRunning += 1
1902 while wait and numRunning > 0:
1903 sleep(
1904 2,
1905 "{}: waiting for {} daemon to be stopped".format(
1906 self.name, daemon
1907 ),
1908 )
1909
1910 # 2nd round of kill if daemons didn't exit
1911 for d in dmns[:-1]:
1912 if re.search(r"%s" % daemon, d):
1913 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1914 if daemonpid.isdigit() and pid_exists(
1915 int(daemonpid)
1916 ):
1917 logger.info(
1918 "{}: killing {}".format(
1919 self.name,
1920 os.path.basename(
1921 d.rstrip().rsplit(".", 1)[0]
1922 ),
1923 )
1924 )
1925 self.cmd("kill -9 %s" % daemonpid)
1926 if daemonpid.isdigit() and not pid_exists(
1927 int(daemonpid)
1928 ):
1929 numRunning -= 1
1930 self.cmd("rm -- {}".format(d.rstrip()))
1931 if wait:
1932 errors = self.checkRouterCores(reportOnce=True)
1933 if self.checkRouterVersion("<", minErrorVersion):
1934 # ignore errors in old versions
1935 errors = ""
1936 if assertOnError and len(errors) > 0:
1937 assert "Errors found - details follow:" == 0, errors
1938 else:
1939 daemonsNotRunning.append(daemon)
1940 if len(daemonsNotRunning) > 0:
1941 errors = errors + "Daemons are not running", daemonsNotRunning
1942
1943 return errors
1944
1945 def checkRouterCores(self, reportLeaks=True, reportOnce=False):
1946 if reportOnce and not self.reportCores:
1947 return
1948 reportMade = False
1949 traces = ""
1950 for daemon in self.daemons:
1951 if self.daemons[daemon] == 1:
1952 # Look for core file
1953 corefiles = glob.glob(
1954 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
1955 )
1956 if len(corefiles) > 0:
1957 backtrace = gdb_core(self, daemon, corefiles)
1958 traces = (
1959 traces
1960 + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s"
1961 % (self.name, daemon, backtrace)
1962 )
1963 reportMade = True
1964 elif reportLeaks:
1965 log = self.getStdErr(daemon)
1966 if "memstats" in log:
1967 sys.stderr.write(
1968 "%s: %s has memory leaks:\n" % (self.name, daemon)
1969 )
1970 traces = traces + "\n%s: %s has memory leaks:\n" % (
1971 self.name,
1972 daemon,
1973 )
1974 log = re.sub("core_handler: ", "", log)
1975 log = re.sub(
1976 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
1977 r"\n ## \1",
1978 log,
1979 )
1980 log = re.sub("memstats: ", " ", log)
1981 sys.stderr.write(log)
1982 reportMade = True
1983 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
1984 if checkAddressSanitizerError(
1985 self.getStdErr(daemon), self.name, daemon, self.logdir
1986 ):
1987 sys.stderr.write(
1988 "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)
1989 )
1990 traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (
1991 self.name,
1992 daemon,
1993 )
1994 reportMade = True
1995 if reportMade:
1996 self.reportCores = False
1997 return traces
1998
1999 def checkRouterRunning(self):
2000 "Check if router daemons are running and collect crashinfo they don't run"
2001
2002 global fatal_error
2003
2004 daemonsRunning = self.cmd(
2005 'vtysh -c "show logging" | grep "Logging configuration for"'
2006 )
2007 # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
2008 if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"):
2009 return "%s: vtysh killed by AddressSanitizer" % (self.name)
2010
2011 for daemon in self.daemons:
2012 if daemon == "snmpd":
2013 continue
2014 if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
2015 sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
2016 if daemon == "staticd":
2017 sys.stderr.write(
2018 "You may have a copy of staticd installed but are attempting to test against\n"
2019 )
2020 sys.stderr.write(
2021 "a version of FRR that does not have staticd, please cleanup the install dir\n"
2022 )
2023
2024 # Look for core file
2025 corefiles = glob.glob(
2026 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
2027 )
2028 if len(corefiles) > 0:
2029 gdb_core(self, daemon, corefiles)
2030 else:
2031 # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
2032 if os.path.isfile(
2033 "{}/{}/{}.log".format(self.logdir, self.name, daemon)
2034 ):
2035 log_tail = subprocess.check_output(
2036 [
2037 "tail -n20 {}/{}/{}.log 2> /dev/null".format(
2038 self.logdir, self.name, daemon
2039 )
2040 ],
2041 shell=True,
2042 )
2043 sys.stderr.write(
2044 "\nFrom %s %s %s log file:\n"
2045 % (self.routertype, self.name, daemon)
2046 )
2047 sys.stderr.write("%s\n" % log_tail)
2048
2049 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2050 if checkAddressSanitizerError(
2051 self.getStdErr(daemon), self.name, daemon, self.logdir
2052 ):
2053 return "%s: Daemon %s not running - killed by AddressSanitizer" % (
2054 self.name,
2055 daemon,
2056 )
2057
2058 return "%s: Daemon %s not running" % (self.name, daemon)
2059 return ""
2060
2061 def checkRouterVersion(self, cmpop, version):
2062 """
2063 Compares router version using operation `cmpop` with `version`.
2064 Valid `cmpop` values:
2065 * `>=`: has the same version or greater
2066 * '>': has greater version
2067 * '=': has the same version
2068 * '<': has a lesser version
2069 * '<=': has the same version or lesser
2070
2071 Usage example: router.checkRouterVersion('>', '1.0')
2072 """
2073
2074 # Make sure we have version information first
2075 if self.version == None:
2076 self.version = self.cmd(
2077 os.path.join(self.daemondir, "bgpd") + " -v"
2078 ).split()[2]
2079 logger.info("{}: running version: {}".format(self.name, self.version))
2080
2081 rversion = self.version
2082 if rversion == None:
2083 return False
2084
2085 result = version_cmp(rversion, version)
2086 if cmpop == ">=":
2087 return result >= 0
2088 if cmpop == ">":
2089 return result > 0
2090 if cmpop == "=":
2091 return result == 0
2092 if cmpop == "<":
2093 return result < 0
2094 if cmpop == "<":
2095 return result < 0
2096 if cmpop == "<=":
2097 return result <= 0
2098
2099 def get_ipv6_linklocal(self):
2100 "Get LinkLocal Addresses from interfaces"
2101
2102 linklocal = []
2103
2104 ifaces = self.cmd("ip -6 address")
2105 # Fix newlines (make them all the same)
2106 ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
2107 interface = ""
2108 ll_per_if_count = 0
2109 for line in ifaces:
2110 m = re.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line)
2111 if m:
2112 interface = m.group(1)
2113 ll_per_if_count = 0
2114 m = re.search(
2115 "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
2116 line,
2117 )
2118 if m:
2119 local = m.group(1)
2120 ll_per_if_count += 1
2121 if ll_per_if_count > 1:
2122 linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
2123 else:
2124 linklocal += [[interface, local]]
2125 return linklocal
2126
2127 def daemon_available(self, daemon):
2128 "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
2129
2130 daemon_path = os.path.join(self.daemondir, daemon)
2131 if not os.path.isfile(daemon_path):
2132 return False
2133 if daemon == "ldpd":
2134 if version_cmp(platform.release(), "4.5") < 0:
2135 return False
2136 if not module_present("mpls-router", load=False):
2137 return False
2138 if not module_present("mpls-iptunnel", load=False):
2139 return False
2140 return True
2141
2142 def get_routertype(self):
2143 "Return the type of Router (frr)"
2144
2145 return self.routertype
2146
2147 def report_memory_leaks(self, filename_prefix, testscript):
2148 "Report Memory Leaks to file prefixed with given string"
2149
2150 leakfound = False
2151 filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt"
2152 for daemon in self.daemons:
2153 if self.daemons[daemon] == 1:
2154 log = self.getStdErr(daemon)
2155 if "memstats" in log:
2156 # Found memory leak
2157 logger.info(
2158 "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log)
2159 )
2160 if not leakfound:
2161 leakfound = True
2162 # Check if file already exists
2163 fileexists = os.path.isfile(filename)
2164 leakfile = open(filename, "a")
2165 if not fileexists:
2166 # New file - add header
2167 leakfile.write(
2168 "# Memory Leak Detection for topotest %s\n\n"
2169 % testscript
2170 )
2171 leakfile.write("## Router %s\n" % self.name)
2172 leakfile.write("### Process %s\n" % daemon)
2173 log = re.sub("core_handler: ", "", log)
2174 log = re.sub(
2175 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
2176 r"\n#### \1\n",
2177 log,
2178 )
2179 log = re.sub("memstats: ", " ", log)
2180 leakfile.write(log)
2181 leakfile.write("\n")
2182 if leakfound:
2183 leakfile.close()
2184
2185
2186 def frr_unicode(s):
2187 """Convert string to unicode, depending on python version"""
2188 if sys.version_info[0] > 2:
2189 return s
2190 else:
2191 return unicode(s) # pylint: disable=E0602
2192
2193
2194 def is_mapping(o):
2195 return isinstance(o, Mapping)