]> git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/lib/topotest.py
doc: Add `show ipv6 rpf X:X::X:X` command to docs
[mirror_frr.git] / tests / topotests / lib / topotest.py
1 #!/usr/bin/env python
2
3 #
4 # topotest.py
5 # Library of helper functions for NetDEF Topology Tests
6 #
7 # Copyright (c) 2016 by
8 # Network Device Education Foundation, Inc. ("NetDEF")
9 #
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
13 # in all copies.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 # OF THIS SOFTWARE.
23 #
24
25 import difflib
26 import errno
27 import functools
28 import glob
29 import json
30 import os
31 import pdb
32 import platform
33 import re
34 import resource
35 import signal
36 import subprocess
37 import sys
38 import tempfile
39 import time
40 from copy import deepcopy
41
42 import lib.topolog as topolog
43 from lib.topolog import logger
44
45 if sys.version_info[0] > 2:
46 import configparser
47 from collections.abc import Mapping
48 else:
49 import ConfigParser as configparser
50 from collections import Mapping
51
52 from lib import micronet
53 from lib.micronet_compat import Node
54
55 g_extra_config = {}
56
57
58 def get_logs_path(rundir):
59 logspath = topolog.get_test_logdir()
60 return os.path.join(rundir, logspath)
61
62
63 def gdb_core(obj, daemon, corefiles):
64 gdbcmds = """
65 info threads
66 bt full
67 disassemble
68 up
69 disassemble
70 up
71 disassemble
72 up
73 disassemble
74 up
75 disassemble
76 up
77 disassemble
78 """
79 gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")]
80 gdbcmds = [item for sl in gdbcmds for item in sl]
81
82 daemon_path = os.path.join(obj.daemondir, daemon)
83 backtrace = subprocess.check_output(
84 ["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds
85 )
86 sys.stderr.write(
87 "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon)
88 )
89 sys.stderr.write("%s" % backtrace)
90 return backtrace
91
92
93 class json_cmp_result(object):
94 "json_cmp result class for better assertion messages"
95
96 def __init__(self):
97 self.errors = []
98
99 def add_error(self, error):
100 "Append error message to the result"
101 for line in error.splitlines():
102 self.errors.append(line)
103
104 def has_errors(self):
105 "Returns True if there were errors, otherwise False."
106 return len(self.errors) > 0
107
108 def gen_report(self):
109 headline = ["Generated JSON diff error report:", ""]
110 return headline + self.errors
111
112 def __str__(self):
113 return (
114 "Generated JSON diff error report:\n\n\n" + "\n".join(self.errors) + "\n\n"
115 )
116
117
118 def gen_json_diff_report(d1, d2, exact=False, path="> $", acc=(0, "")):
119 """
120 Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye.
121 """
122
123 def dump_json(v):
124 if isinstance(v, (dict, list)):
125 return "\t" + "\t".join(
126 json.dumps(v, indent=4, separators=(",", ": ")).splitlines(True)
127 )
128 else:
129 return "'{}'".format(v)
130
131 def json_type(v):
132 if isinstance(v, (list, tuple)):
133 return "Array"
134 elif isinstance(v, dict):
135 return "Object"
136 elif isinstance(v, (int, float)):
137 return "Number"
138 elif isinstance(v, bool):
139 return "Boolean"
140 elif isinstance(v, str):
141 return "String"
142 elif v == None:
143 return "null"
144
145 def get_errors(other_acc):
146 return other_acc[1]
147
148 def get_errors_n(other_acc):
149 return other_acc[0]
150
151 def add_error(acc, msg, points=1):
152 return (acc[0] + points, acc[1] + "{}: {}\n".format(path, msg))
153
154 def merge_errors(acc, other_acc):
155 return (acc[0] + other_acc[0], acc[1] + other_acc[1])
156
157 def add_idx(idx):
158 return "{}[{}]".format(path, idx)
159
160 def add_key(key):
161 return "{}->{}".format(path, key)
162
163 def has_errors(other_acc):
164 return other_acc[0] > 0
165
166 if d2 == "*" or (
167 not isinstance(d1, (list, dict))
168 and not isinstance(d2, (list, dict))
169 and d1 == d2
170 ):
171 return acc
172 elif (
173 not isinstance(d1, (list, dict))
174 and not isinstance(d2, (list, dict))
175 and d1 != d2
176 ):
177 acc = add_error(
178 acc,
179 "d1 has element with value '{}' but in d2 it has value '{}'".format(d1, d2),
180 )
181 elif (
182 isinstance(d1, list)
183 and isinstance(d2, list)
184 and ((len(d2) > 0 and d2[0] == "__ordered__") or exact)
185 ):
186 if not exact:
187 del d2[0]
188 if len(d1) != len(d2):
189 acc = add_error(
190 acc,
191 "d1 has Array of length {} but in d2 it is of length {}".format(
192 len(d1), len(d2)
193 ),
194 )
195 else:
196 for idx, v1, v2 in zip(range(0, len(d1)), d1, d2):
197 acc = merge_errors(
198 acc, gen_json_diff_report(v1, v2, exact=exact, path=add_idx(idx))
199 )
200 elif isinstance(d1, list) and isinstance(d2, list):
201 if len(d1) < len(d2):
202 acc = add_error(
203 acc,
204 "d1 has Array of length {} but in d2 it is of length {}".format(
205 len(d1), len(d2)
206 ),
207 )
208 else:
209 for idx2, v2 in zip(range(0, len(d2)), d2):
210 found_match = False
211 closest_diff = None
212 closest_idx = None
213 for idx1, v1 in zip(range(0, len(d1)), d1):
214 tmp_v1 = deepcopy(v1)
215 tmp_v2 = deepcopy(v2)
216 tmp_diff = gen_json_diff_report(tmp_v1, tmp_v2, path=add_idx(idx1))
217 if not has_errors(tmp_diff):
218 found_match = True
219 del d1[idx1]
220 break
221 elif not closest_diff or get_errors_n(tmp_diff) < get_errors_n(
222 closest_diff
223 ):
224 closest_diff = tmp_diff
225 closest_idx = idx1
226 if not found_match and isinstance(v2, (list, dict)):
227 sub_error = "\n\n\t{}".format(
228 "\t".join(get_errors(closest_diff).splitlines(True))
229 )
230 acc = add_error(
231 acc,
232 (
233 "d2 has the following element at index {} which is not present in d1: "
234 + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}"
235 ).format(idx2, dump_json(v2), closest_idx, sub_error),
236 )
237 if not found_match and not isinstance(v2, (list, dict)):
238 acc = add_error(
239 acc,
240 "d2 has the following element at index {} which is not present in d1: {}".format(
241 idx2, dump_json(v2)
242 ),
243 )
244 elif isinstance(d1, dict) and isinstance(d2, dict) and exact:
245 invalid_keys_d1 = [k for k in d1.keys() if k not in d2.keys()]
246 invalid_keys_d2 = [k for k in d2.keys() if k not in d1.keys()]
247 for k in invalid_keys_d1:
248 acc = add_error(acc, "d1 has key '{}' which is not present in d2".format(k))
249 for k in invalid_keys_d2:
250 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
251 valid_keys_intersection = [k for k in d1.keys() if k in d2.keys()]
252 for k in valid_keys_intersection:
253 acc = merge_errors(
254 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
255 )
256 elif isinstance(d1, dict) and isinstance(d2, dict):
257 none_keys = [k for k, v in d2.items() if v == None]
258 none_keys_present = [k for k in d1.keys() if k in none_keys]
259 for k in none_keys_present:
260 acc = add_error(
261 acc, "d1 has key '{}' which is not supposed to be present".format(k)
262 )
263 keys = [k for k, v in d2.items() if v != None]
264 invalid_keys_intersection = [k for k in keys if k not in d1.keys()]
265 for k in invalid_keys_intersection:
266 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
267 valid_keys_intersection = [k for k in keys if k in d1.keys()]
268 for k in valid_keys_intersection:
269 acc = merge_errors(
270 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
271 )
272 else:
273 acc = add_error(
274 acc,
275 "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format(
276 json_type(d1), json_type(d2)
277 ),
278 points=2,
279 )
280
281 return acc
282
283
284 def json_cmp(d1, d2, exact=False):
285 """
286 JSON compare function. Receives two parameters:
287 * `d1`: parsed JSON data structure
288 * `d2`: parsed JSON data structure
289
290 Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
291 in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an
292 error report is generated and wrapped in a 'json_cmp_result()'. There are special
293 parameters and notations explained below which can be used to cover rather unusual
294 cases:
295
296 * when 'exact is set to 'True' then d1 and d2 are tested for equality (including
297 order within JSON Arrays)
298 * using 'null' (or 'None' in Python) as JSON Object value is checking for key
299 absence in d1
300 * using '*' as JSON Object value or Array value is checking for presence in d1
301 without checking the values
302 * using '__ordered__' as first element in a JSON Array in d2 will also check the
303 order when it is compared to an Array in d1
304 """
305
306 (errors_n, errors) = gen_json_diff_report(deepcopy(d1), deepcopy(d2), exact=exact)
307
308 if errors_n > 0:
309 result = json_cmp_result()
310 result.add_error(errors)
311 return result
312 else:
313 return None
314
315
316 def router_output_cmp(router, cmd, expected):
317 """
318 Runs `cmd` in router and compares the output with `expected`.
319 """
320 return difflines(
321 normalize_text(router.vtysh_cmd(cmd)),
322 normalize_text(expected),
323 title1="Current output",
324 title2="Expected output",
325 )
326
327
328 def router_json_cmp(router, cmd, data, exact=False):
329 """
330 Runs `cmd` that returns JSON data (normally the command ends with 'json')
331 and compare with `data` contents.
332 """
333 return json_cmp(router.vtysh_cmd(cmd, isjson=True), data, exact)
334
335
336 def run_and_expect(func, what, count=20, wait=3):
337 """
338 Run `func` and compare the result with `what`. Do it for `count` times
339 waiting `wait` seconds between tries. By default it tries 20 times with
340 3 seconds delay between tries.
341
342 Returns (True, func-return) on success or
343 (False, func-return) on failure.
344
345 ---
346
347 Helper functions to use with this function:
348 - router_output_cmp
349 - router_json_cmp
350 """
351 start_time = time.time()
352 func_name = "<unknown>"
353 if func.__class__ == functools.partial:
354 func_name = func.func.__name__
355 else:
356 func_name = func.__name__
357
358 # Just a safety-check to avoid running topotests with very
359 # small wait/count arguments.
360 wait_time = wait * count
361 if wait_time < 5:
362 assert (
363 wait_time >= 5
364 ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
365 count, wait
366 )
367
368 logger.info(
369 "'{}' polling started (interval {} secs, maximum {} tries)".format(
370 func_name, wait, count
371 )
372 )
373
374 while count > 0:
375 result = func()
376 if result != what:
377 time.sleep(wait)
378 count -= 1
379 continue
380
381 end_time = time.time()
382 logger.info(
383 "'{}' succeeded after {:.2f} seconds".format(
384 func_name, end_time - start_time
385 )
386 )
387 return (True, result)
388
389 end_time = time.time()
390 logger.error(
391 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
392 )
393 return (False, result)
394
395
396 def run_and_expect_type(func, etype, count=20, wait=3, avalue=None):
397 """
398 Run `func` and compare the result with `etype`. Do it for `count` times
399 waiting `wait` seconds between tries. By default it tries 20 times with
400 3 seconds delay between tries.
401
402 This function is used when you want to test the return type and,
403 optionally, the return value.
404
405 Returns (True, func-return) on success or
406 (False, func-return) on failure.
407 """
408 start_time = time.time()
409 func_name = "<unknown>"
410 if func.__class__ == functools.partial:
411 func_name = func.func.__name__
412 else:
413 func_name = func.__name__
414
415 # Just a safety-check to avoid running topotests with very
416 # small wait/count arguments.
417 wait_time = wait * count
418 if wait_time < 5:
419 assert (
420 wait_time >= 5
421 ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
422 count, wait
423 )
424
425 logger.info(
426 "'{}' polling started (interval {} secs, maximum wait {} secs)".format(
427 func_name, wait, int(wait * count)
428 )
429 )
430
431 while count > 0:
432 result = func()
433 if not isinstance(result, etype):
434 logger.debug(
435 "Expected result type '{}' got '{}' instead".format(etype, type(result))
436 )
437 time.sleep(wait)
438 count -= 1
439 continue
440
441 if etype != type(None) and avalue != None and result != avalue:
442 logger.debug("Expected value '{}' got '{}' instead".format(avalue, result))
443 time.sleep(wait)
444 count -= 1
445 continue
446
447 end_time = time.time()
448 logger.info(
449 "'{}' succeeded after {:.2f} seconds".format(
450 func_name, end_time - start_time
451 )
452 )
453 return (True, result)
454
455 end_time = time.time()
456 logger.error(
457 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
458 )
459 return (False, result)
460
461
462 def router_json_cmp_retry(router, cmd, data, exact=False, retry_timeout=10.0):
463 """
464 Runs `cmd` that returns JSON data (normally the command ends with 'json')
465 and compare with `data` contents. Retry by default for 10 seconds
466 """
467
468 def test_func():
469 return router_json_cmp(router, cmd, data, exact)
470
471 ok, _ = run_and_expect(test_func, None, int(retry_timeout), 1)
472 return ok
473
474
475 def int2dpid(dpid):
476 "Converting Integer to DPID"
477
478 try:
479 dpid = hex(dpid)[2:]
480 dpid = "0" * (16 - len(dpid)) + dpid
481 return dpid
482 except IndexError:
483 raise Exception(
484 "Unable to derive default datapath ID - "
485 "please either specify a dpid or use a "
486 "canonical switch name such as s23."
487 )
488
489
490 def pid_exists(pid):
491 "Check whether pid exists in the current process table."
492
493 if pid <= 0:
494 return False
495 try:
496 os.waitpid(pid, os.WNOHANG)
497 except:
498 pass
499 try:
500 os.kill(pid, 0)
501 except OSError as err:
502 if err.errno == errno.ESRCH:
503 # ESRCH == No such process
504 return False
505 elif err.errno == errno.EPERM:
506 # EPERM clearly means there's a process to deny access to
507 return True
508 else:
509 # According to "man 2 kill" possible error values are
510 # (EINVAL, EPERM, ESRCH)
511 raise
512 else:
513 return True
514
515
516 def get_textdiff(text1, text2, title1="", title2="", **opts):
517 "Returns empty string if same or formatted diff"
518
519 diff = "\n".join(
520 difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts)
521 )
522 # Clean up line endings
523 diff = os.linesep.join([s for s in diff.splitlines() if s])
524 return diff
525
526
527 def difflines(text1, text2, title1="", title2="", **opts):
528 "Wrapper for get_textdiff to avoid string transformations."
529 text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1)
530 text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1)
531 return get_textdiff(text1, text2, title1, title2, **opts)
532
533
534 def get_file(content):
535 """
536 Generates a temporary file in '/tmp' with `content` and returns the file name.
537 """
538 if isinstance(content, list) or isinstance(content, tuple):
539 content = "\n".join(content)
540 fde = tempfile.NamedTemporaryFile(mode="w", delete=False)
541 fname = fde.name
542 fde.write(content)
543 fde.close()
544 return fname
545
546
547 def normalize_text(text):
548 """
549 Strips formating spaces/tabs, carriage returns and trailing whitespace.
550 """
551 text = re.sub(r"[ \t]+", " ", text)
552 text = re.sub(r"\r", "", text)
553
554 # Remove whitespace in the middle of text.
555 text = re.sub(r"[ \t]+\n", "\n", text)
556 # Remove whitespace at the end of the text.
557 text = text.rstrip()
558
559 return text
560
561
562 def is_linux():
563 """
564 Parses unix name output to check if running on GNU/Linux.
565
566 Returns True if running on Linux, returns False otherwise.
567 """
568
569 if os.uname()[0] == "Linux":
570 return True
571 return False
572
573
574 def iproute2_is_vrf_capable():
575 """
576 Checks if the iproute2 version installed on the system is capable of
577 handling VRFs by interpreting the output of the 'ip' utility found in PATH.
578
579 Returns True if capability can be detected, returns False otherwise.
580 """
581
582 if is_linux():
583 try:
584 subp = subprocess.Popen(
585 ["ip", "route", "show", "vrf"],
586 stdout=subprocess.PIPE,
587 stderr=subprocess.PIPE,
588 stdin=subprocess.PIPE,
589 )
590 iproute2_err = subp.communicate()[1].splitlines()[0].split()[0]
591
592 if iproute2_err != "Error:":
593 return True
594 except Exception:
595 pass
596 return False
597
598 def iproute2_is_fdb_get_capable():
599 """
600 Checks if the iproute2 version installed on the system is capable of
601 handling `bridge fdb get` commands to query neigh table resolution.
602
603 Returns True if capability can be detected, returns False otherwise.
604 """
605
606 if is_linux():
607 try:
608 subp = subprocess.Popen(
609 ["bridge", "fdb", "get", "help"],
610 stdout=subprocess.PIPE,
611 stderr=subprocess.PIPE,
612 stdin=subprocess.PIPE,
613 )
614 iproute2_out = subp.communicate()[1].splitlines()[0].split()[0]
615
616 if "Usage" in str(iproute2_out):
617 return True
618 except Exception:
619 pass
620 return False
621
622 def module_present_linux(module, load):
623 """
624 Returns whether `module` is present.
625
626 If `load` is true, it will try to load it via modprobe.
627 """
628 with open("/proc/modules", "r") as modules_file:
629 if module.replace("-", "_") in modules_file.read():
630 return True
631 cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module)
632 if os.system(cmd) != 0:
633 return False
634 else:
635 return True
636
637
638 def module_present_freebsd(module, load):
639 return True
640
641
642 def module_present(module, load=True):
643 if sys.platform.startswith("linux"):
644 return module_present_linux(module, load)
645 elif sys.platform.startswith("freebsd"):
646 return module_present_freebsd(module, load)
647
648
649 def version_cmp(v1, v2):
650 """
651 Compare two version strings and returns:
652
653 * `-1`: if `v1` is less than `v2`
654 * `0`: if `v1` is equal to `v2`
655 * `1`: if `v1` is greater than `v2`
656
657 Raises `ValueError` if versions are not well formated.
658 """
659 vregex = r"(?P<whole>\d+(\.(\d+))*)"
660 v1m = re.match(vregex, v1)
661 v2m = re.match(vregex, v2)
662 if v1m is None or v2m is None:
663 raise ValueError("got a invalid version string")
664
665 # Split values
666 v1g = v1m.group("whole").split(".")
667 v2g = v2m.group("whole").split(".")
668
669 # Get the longest version string
670 vnum = len(v1g)
671 if len(v2g) > vnum:
672 vnum = len(v2g)
673
674 # Reverse list because we are going to pop the tail
675 v1g.reverse()
676 v2g.reverse()
677 for _ in range(vnum):
678 try:
679 v1n = int(v1g.pop())
680 except IndexError:
681 while v2g:
682 v2n = int(v2g.pop())
683 if v2n > 0:
684 return -1
685 break
686
687 try:
688 v2n = int(v2g.pop())
689 except IndexError:
690 if v1n > 0:
691 return 1
692 while v1g:
693 v1n = int(v1g.pop())
694 if v1n > 0:
695 return 1
696 break
697
698 if v1n > v2n:
699 return 1
700 if v1n < v2n:
701 return -1
702 return 0
703
704
705 def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None):
706 if ifaceaction:
707 str_ifaceaction = "no shutdown"
708 else:
709 str_ifaceaction = "shutdown"
710 if vrf_name == None:
711 cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
712 ifacename, str_ifaceaction
713 )
714 else:
715 cmd = (
716 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
717 ifacename, vrf_name, str_ifaceaction
718 )
719 )
720 node.run(cmd)
721
722
723 def ip4_route_zebra(node, vrf_name=None):
724 """
725 Gets an output of 'show ip route' command. It can be used
726 with comparing the output to a reference
727 """
728 if vrf_name == None:
729 tmp = node.vtysh_cmd("show ip route")
730 else:
731 tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name))
732 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
733
734 lines = output.splitlines()
735 header_found = False
736 while lines and (not lines[0].strip() or not header_found):
737 if "o - offload failure" in lines[0]:
738 header_found = True
739 lines = lines[1:]
740 return "\n".join(lines)
741
742
743 def ip6_route_zebra(node, vrf_name=None):
744 """
745 Retrieves the output of 'show ipv6 route [vrf vrf_name]', then
746 canonicalizes it by eliding link-locals.
747 """
748
749 if vrf_name == None:
750 tmp = node.vtysh_cmd("show ipv6 route")
751 else:
752 tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name))
753
754 # Mask out timestamp
755 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
756
757 # Mask out the link-local addresses
758 output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output)
759
760 lines = output.splitlines()
761 header_found = False
762 while lines and (not lines[0].strip() or not header_found):
763 if "o - offload failure" in lines[0]:
764 header_found = True
765 lines = lines[1:]
766
767 return "\n".join(lines)
768
769
770 def proto_name_to_number(protocol):
771 return {
772 "bgp": "186",
773 "isis": "187",
774 "ospf": "188",
775 "rip": "189",
776 "ripng": "190",
777 "nhrp": "191",
778 "eigrp": "192",
779 "ldp": "193",
780 "sharp": "194",
781 "pbr": "195",
782 "static": "196",
783 "ospf6": "197",
784 }.get(
785 protocol, protocol
786 ) # default return same as input
787
788
789 def ip4_route(node):
790 """
791 Gets a structured return of the command 'ip route'. It can be used in
792 conjunction with json_cmp() to provide accurate assert explanations.
793
794 Return example:
795 {
796 '10.0.1.0/24': {
797 'dev': 'eth0',
798 'via': '172.16.0.1',
799 'proto': '188',
800 },
801 '10.0.2.0/24': {
802 'dev': 'eth1',
803 'proto': 'kernel',
804 }
805 }
806 """
807 output = normalize_text(node.run("ip route")).splitlines()
808 result = {}
809 for line in output:
810 columns = line.split(" ")
811 route = result[columns[0]] = {}
812 prev = None
813 for column in columns:
814 if prev == "dev":
815 route["dev"] = column
816 if prev == "via":
817 route["via"] = column
818 if prev == "proto":
819 # translate protocol names back to numbers
820 route["proto"] = proto_name_to_number(column)
821 if prev == "metric":
822 route["metric"] = column
823 if prev == "scope":
824 route["scope"] = column
825 prev = column
826
827 return result
828
829
830 def ip4_vrf_route(node):
831 """
832 Gets a structured return of the command 'ip route show vrf {0}-cust1'.
833 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
834
835 Return example:
836 {
837 '10.0.1.0/24': {
838 'dev': 'eth0',
839 'via': '172.16.0.1',
840 'proto': '188',
841 },
842 '10.0.2.0/24': {
843 'dev': 'eth1',
844 'proto': 'kernel',
845 }
846 }
847 """
848 output = normalize_text(
849 node.run("ip route show vrf {0}-cust1".format(node.name))
850 ).splitlines()
851
852 result = {}
853 for line in output:
854 columns = line.split(" ")
855 route = result[columns[0]] = {}
856 prev = None
857 for column in columns:
858 if prev == "dev":
859 route["dev"] = column
860 if prev == "via":
861 route["via"] = column
862 if prev == "proto":
863 # translate protocol names back to numbers
864 route["proto"] = proto_name_to_number(column)
865 if prev == "metric":
866 route["metric"] = column
867 if prev == "scope":
868 route["scope"] = column
869 prev = column
870
871 return result
872
873
874 def ip6_route(node):
875 """
876 Gets a structured return of the command 'ip -6 route'. It can be used in
877 conjunction with json_cmp() to provide accurate assert explanations.
878
879 Return example:
880 {
881 '2001:db8:1::/64': {
882 'dev': 'eth0',
883 'proto': '188',
884 },
885 '2001:db8:2::/64': {
886 'dev': 'eth1',
887 'proto': 'kernel',
888 }
889 }
890 """
891 output = normalize_text(node.run("ip -6 route")).splitlines()
892 result = {}
893 for line in output:
894 columns = line.split(" ")
895 route = result[columns[0]] = {}
896 prev = None
897 for column in columns:
898 if prev == "dev":
899 route["dev"] = column
900 if prev == "via":
901 route["via"] = column
902 if prev == "proto":
903 # translate protocol names back to numbers
904 route["proto"] = proto_name_to_number(column)
905 if prev == "metric":
906 route["metric"] = column
907 if prev == "pref":
908 route["pref"] = column
909 prev = column
910
911 return result
912
913
914 def ip6_vrf_route(node):
915 """
916 Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'.
917 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
918
919 Return example:
920 {
921 '2001:db8:1::/64': {
922 'dev': 'eth0',
923 'proto': '188',
924 },
925 '2001:db8:2::/64': {
926 'dev': 'eth1',
927 'proto': 'kernel',
928 }
929 }
930 """
931 output = normalize_text(
932 node.run("ip -6 route show vrf {0}-cust1".format(node.name))
933 ).splitlines()
934 result = {}
935 for line in output:
936 columns = line.split(" ")
937 route = result[columns[0]] = {}
938 prev = None
939 for column in columns:
940 if prev == "dev":
941 route["dev"] = column
942 if prev == "via":
943 route["via"] = column
944 if prev == "proto":
945 # translate protocol names back to numbers
946 route["proto"] = proto_name_to_number(column)
947 if prev == "metric":
948 route["metric"] = column
949 if prev == "pref":
950 route["pref"] = column
951 prev = column
952
953 return result
954
955
956 def ip_rules(node):
957 """
958 Gets a structured return of the command 'ip rule'. It can be used in
959 conjunction with json_cmp() to provide accurate assert explanations.
960
961 Return example:
962 [
963 {
964 "pref": "0"
965 "from": "all"
966 },
967 {
968 "pref": "32766"
969 "from": "all"
970 },
971 {
972 "to": "3.4.5.0/24",
973 "iif": "r1-eth2",
974 "pref": "304",
975 "from": "1.2.0.0/16",
976 "proto": "zebra"
977 }
978 ]
979 """
980 output = normalize_text(node.run("ip rule")).splitlines()
981 result = []
982 for line in output:
983 columns = line.split(" ")
984
985 route = {}
986 # remove last character, since it is ':'
987 pref = columns[0][:-1]
988 route["pref"] = pref
989 prev = None
990 for column in columns:
991 if prev == "from":
992 route["from"] = column
993 if prev == "to":
994 route["to"] = column
995 if prev == "proto":
996 route["proto"] = column
997 if prev == "iif":
998 route["iif"] = column
999 if prev == "fwmark":
1000 route["fwmark"] = column
1001 prev = column
1002
1003 result.append(route)
1004 return result
1005
1006
1007 def sleep(amount, reason=None):
1008 """
1009 Sleep wrapper that registers in the log the amount of sleep
1010 """
1011 if reason is None:
1012 logger.info("Sleeping for {} seconds".format(amount))
1013 else:
1014 logger.info(reason + " ({} seconds)".format(amount))
1015
1016 time.sleep(amount)
1017
1018
1019 def checkAddressSanitizerError(output, router, component, logdir=""):
1020 "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
1021
1022 def processAddressSanitizerError(asanErrorRe, output, router, component):
1023 sys.stderr.write(
1024 "%s: %s triggered an exception by AddressSanitizer\n" % (router, component)
1025 )
1026 # Sanitizer Error found in log
1027 pidMark = asanErrorRe.group(1)
1028 addressSanitizerLog = re.search(
1029 "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL
1030 )
1031 if addressSanitizerLog:
1032 # Find Calling Test. Could be multiple steps back
1033 testframe = sys._current_frames().values()[0]
1034 level = 0
1035 while level < 10:
1036 test = os.path.splitext(
1037 os.path.basename(testframe.f_globals["__file__"])
1038 )[0]
1039 if (test != "topotest") and (test != "topogen"):
1040 # Found the calling test
1041 callingTest = os.path.basename(testframe.f_globals["__file__"])
1042 break
1043 level = level + 1
1044 testframe = testframe.f_back
1045 if level >= 10:
1046 # somehow couldn't find the test script.
1047 callingTest = "unknownTest"
1048 #
1049 # Now finding Calling Procedure
1050 level = 0
1051 while level < 20:
1052 callingProc = sys._getframe(level).f_code.co_name
1053 if (
1054 (callingProc != "processAddressSanitizerError")
1055 and (callingProc != "checkAddressSanitizerError")
1056 and (callingProc != "checkRouterCores")
1057 and (callingProc != "stopRouter")
1058 and (callingProc != "stop")
1059 and (callingProc != "stop_topology")
1060 and (callingProc != "checkRouterRunning")
1061 and (callingProc != "check_router_running")
1062 and (callingProc != "routers_have_failure")
1063 ):
1064 # Found the calling test
1065 break
1066 level = level + 1
1067 if level >= 20:
1068 # something wrong - couldn't found the calling test function
1069 callingProc = "unknownProc"
1070 with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile:
1071 sys.stderr.write(
1072 "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1073 % (callingTest, callingProc, router)
1074 )
1075 sys.stderr.write(
1076 "\n".join(addressSanitizerLog.group(1).splitlines()) + "\n"
1077 )
1078 addrSanFile.write("## Error: %s\n\n" % asanErrorRe.group(2))
1079 addrSanFile.write(
1080 "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1081 % (callingTest, callingProc, router)
1082 )
1083 addrSanFile.write(
1084 " "
1085 + "\n ".join(addressSanitizerLog.group(1).splitlines())
1086 + "\n"
1087 )
1088 addrSanFile.write("\n---------------\n")
1089 return
1090
1091 addressSanitizerError = re.search(
1092 r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
1093 )
1094 if addressSanitizerError:
1095 processAddressSanitizerError(addressSanitizerError, output, router, component)
1096 return True
1097
1098 # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
1099 if logdir:
1100 filepattern = logdir + "/" + router + "/" + component + ".asan.*"
1101 logger.debug(
1102 "Log check for %s on %s, pattern %s\n" % (component, router, filepattern)
1103 )
1104 for file in glob.glob(filepattern):
1105 with open(file, "r") as asanErrorFile:
1106 asanError = asanErrorFile.read()
1107 addressSanitizerError = re.search(
1108 r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
1109 )
1110 if addressSanitizerError:
1111 processAddressSanitizerError(
1112 addressSanitizerError, asanError, router, component
1113 )
1114 return True
1115 return False
1116
1117
1118 def _sysctl_atleast(commander, variable, min_value):
1119 if isinstance(min_value, tuple):
1120 min_value = list(min_value)
1121 is_list = isinstance(min_value, list)
1122
1123 sval = commander.cmd_raises("sysctl -n " + variable).strip()
1124 if is_list:
1125 cur_val = [int(x) for x in sval.split()]
1126 else:
1127 cur_val = int(sval)
1128
1129 set_value = False
1130 if is_list:
1131 for i, v in enumerate(cur_val):
1132 if v < min_value[i]:
1133 set_value = True
1134 else:
1135 min_value[i] = v
1136 else:
1137 if cur_val < min_value:
1138 set_value = True
1139 if set_value:
1140 if is_list:
1141 valstr = " ".join([str(x) for x in min_value])
1142 else:
1143 valstr = str(min_value)
1144 logger.info("Increasing sysctl %s from %s to %s", variable, cur_val, valstr)
1145 commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
1146
1147
1148 def _sysctl_assure(commander, variable, value):
1149 if isinstance(value, tuple):
1150 value = list(value)
1151 is_list = isinstance(value, list)
1152
1153 sval = commander.cmd_raises("sysctl -n " + variable).strip()
1154 if is_list:
1155 cur_val = [int(x) for x in sval.split()]
1156 else:
1157 cur_val = sval
1158
1159 set_value = False
1160 if is_list:
1161 for i, v in enumerate(cur_val):
1162 if v != value[i]:
1163 set_value = True
1164 else:
1165 value[i] = v
1166 else:
1167 if cur_val != str(value):
1168 set_value = True
1169
1170 if set_value:
1171 if is_list:
1172 valstr = " ".join([str(x) for x in value])
1173 else:
1174 valstr = str(value)
1175 logger.info("Changing sysctl %s from %s to %s", variable, cur_val, valstr)
1176 commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
1177
1178
1179 def sysctl_atleast(commander, variable, min_value, raises=False):
1180 try:
1181 if commander is None:
1182 commander = micronet.Commander("topotest")
1183 return _sysctl_atleast(commander, variable, min_value)
1184 except subprocess.CalledProcessError as error:
1185 logger.warning(
1186 "%s: Failed to assure sysctl min value %s = %s",
1187 commander,
1188 variable,
1189 min_value,
1190 )
1191 if raises:
1192 raise
1193
1194
1195 def sysctl_assure(commander, variable, value, raises=False):
1196 try:
1197 if commander is None:
1198 commander = micronet.Commander("topotest")
1199 return _sysctl_assure(commander, variable, value)
1200 except subprocess.CalledProcessError as error:
1201 logger.warning(
1202 "%s: Failed to assure sysctl value %s = %s",
1203 commander,
1204 variable,
1205 value,
1206 exc_info=True,
1207 )
1208 if raises:
1209 raise
1210
1211
1212 def rlimit_atleast(rname, min_value, raises=False):
1213 try:
1214 cval = resource.getrlimit(rname)
1215 soft, hard = cval
1216 if soft < min_value:
1217 nval = (min_value, hard if min_value < hard else min_value)
1218 logger.info("Increasing rlimit %s from %s to %s", rname, cval, nval)
1219 resource.setrlimit(rname, nval)
1220 except subprocess.CalledProcessError as error:
1221 logger.warning(
1222 "Failed to assure rlimit [%s] = %s", rname, min_value, exc_info=True
1223 )
1224 if raises:
1225 raise
1226
1227
1228 def fix_netns_limits(ns):
1229
1230 # Maximum read and write socket buffer sizes
1231 sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20])
1232 sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20])
1233
1234 sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0)
1235 sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0)
1236 sysctl_assure(ns, "net.ipv4.conf.lo.rp_filter", 0)
1237
1238 sysctl_assure(ns, "net.ipv4.conf.all.forwarding", 1)
1239 sysctl_assure(ns, "net.ipv4.conf.default.forwarding", 1)
1240
1241 # XXX if things fail look here as this wasn't done previously
1242 sysctl_assure(ns, "net.ipv6.conf.all.forwarding", 1)
1243 sysctl_assure(ns, "net.ipv6.conf.default.forwarding", 1)
1244
1245 # ARP
1246 sysctl_assure(ns, "net.ipv4.conf.default.arp_announce", 2)
1247 sysctl_assure(ns, "net.ipv4.conf.default.arp_notify", 1)
1248 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1249 sysctl_assure(ns, "net.ipv4.conf.default.arp_ignore", 0)
1250 sysctl_assure(ns, "net.ipv4.conf.all.arp_announce", 2)
1251 sysctl_assure(ns, "net.ipv4.conf.all.arp_notify", 1)
1252 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1253 sysctl_assure(ns, "net.ipv4.conf.all.arp_ignore", 0)
1254
1255 sysctl_assure(ns, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
1256
1257 # Keep ipv6 permanent addresses on an admin down
1258 sysctl_assure(ns, "net.ipv6.conf.all.keep_addr_on_down", 1)
1259 if version_cmp(platform.release(), "4.20") >= 0:
1260 sysctl_assure(ns, "net.ipv6.route.skip_notify_on_dev_down", 1)
1261
1262 sysctl_assure(ns, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
1263 sysctl_assure(ns, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
1264
1265 # igmp
1266 sysctl_atleast(ns, "net.ipv4.igmp_max_memberships", 1000)
1267
1268 # Use neigh information on selection of nexthop for multipath hops
1269 sysctl_assure(ns, "net.ipv4.fib_multipath_use_neigh", 1)
1270
1271
1272 def fix_host_limits():
1273 """Increase system limits."""
1274
1275 rlimit_atleast(resource.RLIMIT_NPROC, 8 * 1024)
1276 rlimit_atleast(resource.RLIMIT_NOFILE, 16 * 1024)
1277 sysctl_atleast(None, "fs.file-max", 16 * 1024)
1278 sysctl_atleast(None, "kernel.pty.max", 16 * 1024)
1279
1280 # Enable coredumps
1281 # Original on ubuntu 17.x, but apport won't save as in namespace
1282 # |/usr/share/apport/apport %p %s %c %d %P
1283 sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
1284 sysctl_assure(None, "kernel.core_uses_pid", 1)
1285 sysctl_assure(None, "fs.suid_dumpable", 1)
1286
1287 # Maximum connection backlog
1288 sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
1289
1290 # Maximum read and write socket buffer sizes
1291 sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20)
1292 sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20)
1293
1294 # Garbage Collection Settings for ARP and Neighbors
1295 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
1296 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024)
1297 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024)
1298 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024)
1299 # Hold entries for 10 minutes
1300 sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1301 sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1302
1303 # igmp
1304 sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
1305
1306 # MLD
1307 sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
1308
1309 # Increase routing table size to 128K
1310 sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024)
1311 sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024)
1312
1313
1314 def setup_node_tmpdir(logdir, name):
1315 # Cleanup old log, valgrind, and core files.
1316 subprocess.check_call(
1317 "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir, name), shell=True
1318 )
1319
1320 # Setup the per node directory.
1321 nodelogdir = "{}/{}".format(logdir, name)
1322 subprocess.check_call(
1323 "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True
1324 )
1325 logfile = "{0}/{1}.log".format(logdir, name)
1326 return logfile
1327
1328
1329 class Router(Node):
1330 "A Node with IPv4/IPv6 forwarding enabled"
1331
1332 def __init__(self, name, **params):
1333
1334 # Backward compatibility:
1335 # Load configuration defaults like topogen.
1336 self.config_defaults = configparser.ConfigParser(
1337 defaults={
1338 "verbosity": "info",
1339 "frrdir": "/usr/lib/frr",
1340 "routertype": "frr",
1341 "memleak_path": "",
1342 }
1343 )
1344
1345 self.config_defaults.read(
1346 os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini")
1347 )
1348
1349 # If this topology is using old API and doesn't have logdir
1350 # specified, then attempt to generate an unique logdir.
1351 self.logdir = params.get("logdir")
1352 if self.logdir is None:
1353 self.logdir = get_logs_path(g_extra_config["rundir"])
1354
1355 if not params.get("logger"):
1356 # If logger is present topogen has already set this up
1357 logfile = setup_node_tmpdir(self.logdir, name)
1358 l = topolog.get_logger(name, log_level="debug", target=logfile)
1359 params["logger"] = l
1360
1361 super(Router, self).__init__(name, **params)
1362
1363 self.daemondir = None
1364 self.hasmpls = False
1365 self.routertype = "frr"
1366 self.unified_config = None
1367 self.daemons = {
1368 "zebra": 0,
1369 "ripd": 0,
1370 "ripngd": 0,
1371 "ospfd": 0,
1372 "ospf6d": 0,
1373 "isisd": 0,
1374 "bgpd": 0,
1375 "pimd": 0,
1376 "pim6d": 0,
1377 "ldpd": 0,
1378 "eigrpd": 0,
1379 "nhrpd": 0,
1380 "staticd": 0,
1381 "bfdd": 0,
1382 "sharpd": 0,
1383 "babeld": 0,
1384 "pbrd": 0,
1385 "pathd": 0,
1386 "snmpd": 0,
1387 }
1388 self.daemons_options = {"zebra": ""}
1389 self.reportCores = True
1390 self.version = None
1391
1392 self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid)
1393 try:
1394 # Allow escaping from running inside docker
1395 cgroup = open("/proc/1/cgroup").read()
1396 m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup)
1397 if m:
1398 self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd
1399 except IOError:
1400 pass
1401 else:
1402 logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd))
1403
1404 def _config_frr(self, **params):
1405 "Configure FRR binaries"
1406 self.daemondir = params.get("frrdir")
1407 if self.daemondir is None:
1408 self.daemondir = self.config_defaults.get("topogen", "frrdir")
1409
1410 zebra_path = os.path.join(self.daemondir, "zebra")
1411 if not os.path.isfile(zebra_path):
1412 raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
1413
1414 # pylint: disable=W0221
1415 # Some params are only meaningful for the parent class.
1416 def config(self, **params):
1417 super(Router, self).config(**params)
1418
1419 # User did not specify the daemons directory, try to autodetect it.
1420 self.daemondir = params.get("daemondir")
1421 if self.daemondir is None:
1422 self.routertype = params.get(
1423 "routertype", self.config_defaults.get("topogen", "routertype")
1424 )
1425 self._config_frr(**params)
1426 else:
1427 # Test the provided path
1428 zpath = os.path.join(self.daemondir, "zebra")
1429 if not os.path.isfile(zpath):
1430 raise Exception("No zebra binary found in {}".format(zpath))
1431 # Allow user to specify routertype when the path was specified.
1432 if params.get("routertype") is not None:
1433 self.routertype = params.get("routertype")
1434
1435 # Set ownership of config files
1436 self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype))
1437
1438 def terminate(self):
1439 # Stop running FRR daemons
1440 self.stopRouter()
1441 super(Router, self).terminate()
1442 os.system("chmod -R go+rw " + self.logdir)
1443
1444 # Return count of running daemons
1445 def listDaemons(self):
1446 ret = []
1447 rc, stdout, _ = self.cmd_status(
1448 "ls -1 /var/run/%s/*.pid" % self.routertype, warn=False
1449 )
1450 if rc:
1451 return ret
1452 for d in stdout.strip().split("\n"):
1453 pidfile = d.strip()
1454 try:
1455 pid = int(self.cmd_raises("cat %s" % pidfile, warn=False).strip())
1456 name = os.path.basename(pidfile[:-4])
1457
1458 # probably not compatible with bsd.
1459 rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False)
1460 if rc:
1461 logger.warning(
1462 "%s: %s exited leaving pidfile %s (%s)",
1463 self.name,
1464 name,
1465 pidfile,
1466 pid,
1467 )
1468 self.cmd("rm -- " + pidfile)
1469 else:
1470 ret.append((name, pid))
1471 except (subprocess.CalledProcessError, ValueError):
1472 pass
1473 return ret
1474
1475 def stopRouter(self, assertOnError=True, minErrorVersion="5.1"):
1476 # Stop Running FRR Daemons
1477 running = self.listDaemons()
1478 if not running:
1479 return ""
1480
1481 logger.info("%s: stopping %s", self.name, ", ".join([x[0] for x in running]))
1482 for name, pid in running:
1483 logger.info("{}: sending SIGTERM to {}".format(self.name, name))
1484 try:
1485 os.kill(pid, signal.SIGTERM)
1486 except OSError as err:
1487 logger.info(
1488 "%s: could not kill %s (%s): %s", self.name, name, pid, str(err)
1489 )
1490
1491 running = self.listDaemons()
1492 if running:
1493 for _ in range(0, 30):
1494 sleep(
1495 0.5,
1496 "{}: waiting for daemons stopping: {}".format(
1497 self.name, ", ".join([x[0] for x in running])
1498 ),
1499 )
1500 running = self.listDaemons()
1501 if not running:
1502 break
1503
1504 if not running:
1505 return ""
1506
1507 logger.warning(
1508 "%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running])
1509 )
1510 for name, pid in running:
1511 pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
1512 logger.info("%s: killing %s", self.name, name)
1513 self.cmd("kill -SIGBUS %d" % pid)
1514 self.cmd("rm -- " + pidfile)
1515
1516 sleep(
1517 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name
1518 )
1519
1520 errors = self.checkRouterCores(reportOnce=True)
1521 if self.checkRouterVersion("<", minErrorVersion):
1522 # ignore errors in old versions
1523 errors = ""
1524 if assertOnError and (errors is not None) and len(errors) > 0:
1525 assert "Errors found - details follow:" == 0, errors
1526 return errors
1527
1528 def removeIPs(self):
1529 for interface in self.intfNames():
1530 try:
1531 self.intf_ip_cmd(interface, "ip address flush " + interface)
1532 except Exception as ex:
1533 logger.error("%s can't remove IPs %s", self, str(ex))
1534 # pdb.set_trace()
1535 # assert False, "can't remove IPs %s" % str(ex)
1536
1537 def checkCapability(self, daemon, param):
1538 if param is not None:
1539 daemon_path = os.path.join(self.daemondir, daemon)
1540 daemon_search_option = param.replace("-", "")
1541 output = self.cmd(
1542 "{0} -h | grep {1}".format(daemon_path, daemon_search_option)
1543 )
1544 if daemon_search_option not in output:
1545 return False
1546 return True
1547
1548 def loadConf(self, daemon, source=None, param=None):
1549 """Enabled and set config for a daemon.
1550
1551 Arranges for loading of daemon configuration from the specified source. Possible
1552 `source` values are `None` for an empty config file, a path name which is used
1553 directly, or a file name with no path components which is first looked for
1554 directly and then looked for under a sub-directory named after router.
1555 """
1556
1557 # Unfortunately this API allowsfor source to not exist for any and all routers.
1558 if source:
1559 head, tail = os.path.split(source)
1560 if not head and not self.path_exists(tail):
1561 script_dir = os.environ["PYTEST_TOPOTEST_SCRIPTDIR"]
1562 router_relative = os.path.join(script_dir, self.name, tail)
1563 if self.path_exists(router_relative):
1564 source = router_relative
1565 self.logger.info(
1566 "using router relative configuration: {}".format(source)
1567 )
1568
1569 # print "Daemons before:", self.daemons
1570 if daemon in self.daemons.keys() or daemon == "frr":
1571 if daemon == "frr":
1572 self.unified_config = 1
1573 else:
1574 self.daemons[daemon] = 1
1575 if param is not None:
1576 self.daemons_options[daemon] = param
1577 conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon)
1578 if source is None or not os.path.exists(source):
1579 if daemon == "frr" or not self.unified_config:
1580 self.cmd_raises("rm -f " + conf_file)
1581 self.cmd_raises("touch " + conf_file)
1582 else:
1583 self.cmd_raises("cp {} {}".format(source, conf_file))
1584
1585 if not self.unified_config or daemon == "frr":
1586 self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file))
1587 self.cmd_raises("chmod 664 {}".format(conf_file))
1588
1589 if (daemon == "snmpd") and (self.routertype == "frr"):
1590 # /etc/snmp is private mount now
1591 self.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf')
1592 self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
1593
1594 if (daemon == "zebra") and (self.daemons["staticd"] == 0):
1595 # Add staticd with zebra - if it exists
1596 try:
1597 staticd_path = os.path.join(self.daemondir, "staticd")
1598 except:
1599 pdb.set_trace()
1600
1601 if os.path.isfile(staticd_path):
1602 self.daemons["staticd"] = 1
1603 self.daemons_options["staticd"] = ""
1604 # Auto-Started staticd has no config, so it will read from zebra config
1605 else:
1606 logger.info("No daemon {} known".format(daemon))
1607 # print "Daemons after:", self.daemons
1608
1609 def runInWindow(self, cmd, title=None):
1610 return self.run_in_window(cmd, title)
1611
1612 def startRouter(self, tgen=None):
1613 if self.unified_config:
1614 self.cmd(
1615 'echo "service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1616 % self.routertype
1617 )
1618 else:
1619 # Disable integrated-vtysh-config
1620 self.cmd(
1621 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1622 % self.routertype
1623 )
1624
1625 self.cmd(
1626 "chown %s:%svty /etc/%s/vtysh.conf"
1627 % (self.routertype, self.routertype, self.routertype)
1628 )
1629 # TODO remove the following lines after all tests are migrated to Topogen.
1630 # Try to find relevant old logfiles in /tmp and delete them
1631 map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
1632 # Remove old core files
1633 map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name)))
1634 # Remove IP addresses from OS first - we have them in zebra.conf
1635 self.removeIPs()
1636 # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
1637 # No error - but return message and skip all the tests
1638 if self.daemons["ldpd"] == 1:
1639 ldpd_path = os.path.join(self.daemondir, "ldpd")
1640 if not os.path.isfile(ldpd_path):
1641 logger.info("LDP Test, but no ldpd compiled or installed")
1642 return "LDP Test, but no ldpd compiled or installed"
1643
1644 if version_cmp(platform.release(), "4.5") < 0:
1645 logger.info("LDP Test need Linux Kernel 4.5 minimum")
1646 return "LDP Test need Linux Kernel 4.5 minimum"
1647 # Check if have mpls
1648 if tgen != None:
1649 self.hasmpls = tgen.hasmpls
1650 if self.hasmpls != True:
1651 logger.info(
1652 "LDP/MPLS Tests will be skipped, platform missing module(s)"
1653 )
1654 else:
1655 # Test for MPLS Kernel modules available
1656 self.hasmpls = False
1657 if not module_present("mpls-router"):
1658 logger.info(
1659 "MPLS tests will not run (missing mpls-router kernel module)"
1660 )
1661 elif not module_present("mpls-iptunnel"):
1662 logger.info(
1663 "MPLS tests will not run (missing mpls-iptunnel kernel module)"
1664 )
1665 else:
1666 self.hasmpls = True
1667 if self.hasmpls != True:
1668 return "LDP/MPLS Tests need mpls kernel modules"
1669
1670 # Really want to use sysctl_atleast here, but only when MPLS is actually being
1671 # used
1672 self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
1673
1674 shell_routers = g_extra_config["shell"]
1675 if "all" in shell_routers or self.name in shell_routers:
1676 self.run_in_window(os.getenv("SHELL", "bash"), title="sh-%s" % self.name)
1677
1678 if self.daemons["eigrpd"] == 1:
1679 eigrpd_path = os.path.join(self.daemondir, "eigrpd")
1680 if not os.path.isfile(eigrpd_path):
1681 logger.info("EIGRP Test, but no eigrpd compiled or installed")
1682 return "EIGRP Test, but no eigrpd compiled or installed"
1683
1684 if self.daemons["bfdd"] == 1:
1685 bfdd_path = os.path.join(self.daemondir, "bfdd")
1686 if not os.path.isfile(bfdd_path):
1687 logger.info("BFD Test, but no bfdd compiled or installed")
1688 return "BFD Test, but no bfdd compiled or installed"
1689
1690 status = self.startRouterDaemons(tgen=tgen)
1691
1692 vtysh_routers = g_extra_config["vtysh"]
1693 if "all" in vtysh_routers or self.name in vtysh_routers:
1694 self.run_in_window("vtysh", title="vt-%s" % self.name)
1695
1696 if self.unified_config:
1697 self.cmd("vtysh -f /etc/frr/frr.conf")
1698
1699 return status
1700
1701 def getStdErr(self, daemon):
1702 return self.getLog("err", daemon)
1703
1704 def getStdOut(self, daemon):
1705 return self.getLog("out", daemon)
1706
1707 def getLog(self, log, daemon):
1708 return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log))
1709
1710 def startRouterDaemons(self, daemons=None, tgen=None):
1711 "Starts FRR daemons for this router."
1712
1713 asan_abort = g_extra_config["asan_abort"]
1714 gdb_breakpoints = g_extra_config["gdb_breakpoints"]
1715 gdb_daemons = g_extra_config["gdb_daemons"]
1716 gdb_routers = g_extra_config["gdb_routers"]
1717 valgrind_extra = g_extra_config["valgrind_extra"]
1718 valgrind_memleaks = g_extra_config["valgrind_memleaks"]
1719 strace_daemons = g_extra_config["strace_daemons"]
1720
1721 # Get global bundle data
1722 if not self.path_exists("/etc/frr/support_bundle_commands.conf"):
1723 # Copy global value if was covered by namespace mount
1724 bundle_data = ""
1725 if os.path.exists("/etc/frr/support_bundle_commands.conf"):
1726 with open("/etc/frr/support_bundle_commands.conf", "r") as rf:
1727 bundle_data = rf.read()
1728 self.cmd_raises(
1729 "cat > /etc/frr/support_bundle_commands.conf",
1730 stdin=bundle_data,
1731 )
1732
1733 # Starts actual daemons without init (ie restart)
1734 # cd to per node directory
1735 self.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self.logdir, self.name))
1736 self.set_cwd("{}/{}".format(self.logdir, self.name))
1737 self.cmd("umask 000")
1738
1739 # Re-enable to allow for report per run
1740 self.reportCores = True
1741
1742 # XXX: glue code forward ported from removed function.
1743 if self.version == None:
1744 self.version = self.cmd(
1745 os.path.join(self.daemondir, "bgpd") + " -v"
1746 ).split()[2]
1747 logger.info("{}: running version: {}".format(self.name, self.version))
1748 # If `daemons` was specified then some upper API called us with
1749 # specific daemons, otherwise just use our own configuration.
1750 daemons_list = []
1751 if daemons is not None:
1752 daemons_list = daemons
1753 else:
1754 # Append all daemons configured.
1755 for daemon in self.daemons:
1756 if self.daemons[daemon] == 1:
1757 daemons_list.append(daemon)
1758
1759 def start_daemon(daemon, extra_opts=None):
1760 daemon_opts = self.daemons_options.get(daemon, "")
1761 rediropt = " > {0}.out 2> {0}.err".format(daemon)
1762 if daemon == "snmpd":
1763 binary = "/usr/sbin/snmpd"
1764 cmdenv = ""
1765 cmdopt = "{} -C -c /etc/frr/snmpd.conf -p ".format(
1766 daemon_opts
1767 ) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self.routertype)
1768 else:
1769 binary = os.path.join(self.daemondir, daemon)
1770
1771 cmdenv = "ASAN_OPTIONS="
1772 if asan_abort:
1773 cmdenv = "abort_on_error=1:"
1774 cmdenv += "log_path={0}/{1}.{2}.asan ".format(
1775 self.logdir, self.name, daemon
1776 )
1777
1778 if valgrind_memleaks:
1779 this_dir = os.path.dirname(
1780 os.path.abspath(os.path.realpath(__file__))
1781 )
1782 supp_file = os.path.abspath(
1783 os.path.join(this_dir, "../../../tools/valgrind.supp")
1784 )
1785 cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
1786 daemon, self.logdir, self.name, supp_file
1787 )
1788 if valgrind_extra:
1789 cmdenv += (
1790 " --gen-suppressions=all --expensive-definedness-checks=yes"
1791 )
1792 elif daemon in strace_daemons or "all" in strace_daemons:
1793 cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(
1794 daemon, self.logdir, self.name
1795 )
1796
1797 cmdopt = "{} --command-log-always --log file:{}.log --log-level debug".format(
1798 daemon_opts, daemon
1799 )
1800 if extra_opts:
1801 cmdopt += " " + extra_opts
1802
1803 if (
1804 (gdb_routers or gdb_daemons)
1805 and (
1806 not gdb_routers or self.name in gdb_routers or "all" in gdb_routers
1807 )
1808 and (not gdb_daemons or daemon in gdb_daemons or "all" in gdb_daemons)
1809 ):
1810 if daemon == "snmpd":
1811 cmdopt += " -f "
1812
1813 cmdopt += rediropt
1814 gdbcmd = "sudo -E gdb " + binary
1815 if gdb_breakpoints:
1816 gdbcmd += " -ex 'set breakpoint pending on'"
1817 for bp in gdb_breakpoints:
1818 gdbcmd += " -ex 'b {}'".format(bp)
1819 gdbcmd += " -ex 'run {}'".format(cmdopt)
1820
1821 self.run_in_window(gdbcmd, daemon)
1822
1823 logger.info(
1824 "%s: %s %s launched in gdb window", self, self.routertype, daemon
1825 )
1826 else:
1827 if daemon != "snmpd":
1828 cmdopt += " -d "
1829 cmdopt += rediropt
1830
1831 try:
1832 self.cmd_raises(" ".join([cmdenv, binary, cmdopt]), warn=False)
1833 except subprocess.CalledProcessError as error:
1834 self.logger.error(
1835 '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
1836 self,
1837 daemon,
1838 error.returncode,
1839 error.cmd,
1840 '\n:stdout: "{}"'.format(error.stdout.strip())
1841 if error.stdout
1842 else "",
1843 '\n:stderr: "{}"'.format(error.stderr.strip())
1844 if error.stderr
1845 else "",
1846 )
1847 else:
1848 logger.info("%s: %s %s started", self, self.routertype, daemon)
1849
1850 # Start Zebra first
1851 if "zebra" in daemons_list:
1852 start_daemon("zebra", "-s 90000000")
1853 while "zebra" in daemons_list:
1854 daemons_list.remove("zebra")
1855
1856 # Start staticd next if required
1857 if "staticd" in daemons_list:
1858 start_daemon("staticd")
1859 while "staticd" in daemons_list:
1860 daemons_list.remove("staticd")
1861
1862 if "snmpd" in daemons_list:
1863 # Give zerbra a chance to configure interface addresses that snmpd daemon
1864 # may then use.
1865 time.sleep(2)
1866
1867 start_daemon("snmpd")
1868 while "snmpd" in daemons_list:
1869 daemons_list.remove("snmpd")
1870
1871 if daemons is None:
1872 # Fix Link-Local Addresses on initial startup
1873 # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
1874 _, output, _ = self.cmd_status(
1875 "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done",
1876 stderr=subprocess.STDOUT,
1877 )
1878 logger.debug("Set MACs:\n%s", output)
1879
1880 # Now start all the other daemons
1881 for daemon in daemons_list:
1882 if self.daemons[daemon] == 0:
1883 continue
1884 start_daemon(daemon)
1885
1886 # Check if daemons are running.
1887 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1888 if re.search(r"No such file or directory", rundaemons):
1889 return "Daemons are not running"
1890
1891 # Update the permissions on the log files
1892 self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name))
1893 self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name))
1894
1895 return ""
1896
1897 def killRouterDaemons(
1898 self, daemons, wait=True, assertOnError=True, minErrorVersion="5.1"
1899 ):
1900 # Kill Running FRR
1901 # Daemons(user specified daemon only) using SIGKILL
1902 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1903 errors = ""
1904 daemonsNotRunning = []
1905 if re.search(r"No such file or directory", rundaemons):
1906 return errors
1907 for daemon in daemons:
1908 if rundaemons is not None and daemon in rundaemons:
1909 numRunning = 0
1910 dmns = rundaemons.split("\n")
1911 # Exclude empty string at end of list
1912 for d in dmns[:-1]:
1913 if re.search(r"%s" % daemon, d):
1914 daemonpidfile = d.rstrip()
1915 daemonpid = self.cmd("cat %s" % daemonpidfile).rstrip()
1916 if daemonpid.isdigit() and pid_exists(int(daemonpid)):
1917 logger.info(
1918 "{}: killing {}".format(
1919 self.name,
1920 os.path.basename(daemonpidfile.rsplit(".", 1)[0]),
1921 )
1922 )
1923 os.kill(int(daemonpid), signal.SIGKILL)
1924 if pid_exists(int(daemonpid)):
1925 numRunning += 1
1926 while wait and numRunning > 0:
1927 sleep(
1928 2,
1929 "{}: waiting for {} daemon to be stopped".format(
1930 self.name, daemon
1931 ),
1932 )
1933
1934 # 2nd round of kill if daemons didn't exit
1935 for d in dmns[:-1]:
1936 if re.search(r"%s" % daemon, d):
1937 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1938 if daemonpid.isdigit() and pid_exists(
1939 int(daemonpid)
1940 ):
1941 logger.info(
1942 "{}: killing {}".format(
1943 self.name,
1944 os.path.basename(
1945 d.rstrip().rsplit(".", 1)[0]
1946 ),
1947 )
1948 )
1949 os.kill(int(daemonpid), signal.SIGKILL)
1950 if daemonpid.isdigit() and not pid_exists(
1951 int(daemonpid)
1952 ):
1953 numRunning -= 1
1954 self.cmd("rm -- {}".format(daemonpidfile))
1955 if wait:
1956 errors = self.checkRouterCores(reportOnce=True)
1957 if self.checkRouterVersion("<", minErrorVersion):
1958 # ignore errors in old versions
1959 errors = ""
1960 if assertOnError and len(errors) > 0:
1961 assert "Errors found - details follow:" == 0, errors
1962 else:
1963 daemonsNotRunning.append(daemon)
1964 if len(daemonsNotRunning) > 0:
1965 errors = errors + "Daemons are not running", daemonsNotRunning
1966
1967 return errors
1968
1969 def checkRouterCores(self, reportLeaks=True, reportOnce=False):
1970 if reportOnce and not self.reportCores:
1971 return
1972 reportMade = False
1973 traces = ""
1974 for daemon in self.daemons:
1975 if self.daemons[daemon] == 1:
1976 # Look for core file
1977 corefiles = glob.glob(
1978 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
1979 )
1980 if len(corefiles) > 0:
1981 backtrace = gdb_core(self, daemon, corefiles)
1982 traces = (
1983 traces
1984 + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s"
1985 % (self.name, daemon, backtrace)
1986 )
1987 reportMade = True
1988 elif reportLeaks:
1989 log = self.getStdErr(daemon)
1990 if "memstats" in log:
1991 sys.stderr.write(
1992 "%s: %s has memory leaks:\n" % (self.name, daemon)
1993 )
1994 traces = traces + "\n%s: %s has memory leaks:\n" % (
1995 self.name,
1996 daemon,
1997 )
1998 log = re.sub("core_handler: ", "", log)
1999 log = re.sub(
2000 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
2001 r"\n ## \1",
2002 log,
2003 )
2004 log = re.sub("memstats: ", " ", log)
2005 sys.stderr.write(log)
2006 reportMade = True
2007 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2008 if checkAddressSanitizerError(
2009 self.getStdErr(daemon), self.name, daemon, self.logdir
2010 ):
2011 sys.stderr.write(
2012 "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)
2013 )
2014 traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (
2015 self.name,
2016 daemon,
2017 )
2018 reportMade = True
2019 if reportMade:
2020 self.reportCores = False
2021 return traces
2022
2023 def checkRouterRunning(self):
2024 "Check if router daemons are running and collect crashinfo they don't run"
2025
2026 global fatal_error
2027
2028 daemonsRunning = self.cmd(
2029 'vtysh -c "show logging" | grep "Logging configuration for"'
2030 )
2031 # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
2032 if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"):
2033 return "%s: vtysh killed by AddressSanitizer" % (self.name)
2034
2035 for daemon in self.daemons:
2036 if daemon == "snmpd":
2037 continue
2038 if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
2039 sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
2040 if daemon == "staticd":
2041 sys.stderr.write(
2042 "You may have a copy of staticd installed but are attempting to test against\n"
2043 )
2044 sys.stderr.write(
2045 "a version of FRR that does not have staticd, please cleanup the install dir\n"
2046 )
2047
2048 # Look for core file
2049 corefiles = glob.glob(
2050 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
2051 )
2052 if len(corefiles) > 0:
2053 gdb_core(self, daemon, corefiles)
2054 else:
2055 # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
2056 if os.path.isfile(
2057 "{}/{}/{}.log".format(self.logdir, self.name, daemon)
2058 ):
2059 log_tail = subprocess.check_output(
2060 [
2061 "tail -n20 {}/{}/{}.log 2> /dev/null".format(
2062 self.logdir, self.name, daemon
2063 )
2064 ],
2065 shell=True,
2066 )
2067 sys.stderr.write(
2068 "\nFrom %s %s %s log file:\n"
2069 % (self.routertype, self.name, daemon)
2070 )
2071 sys.stderr.write("%s\n" % log_tail)
2072
2073 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2074 if checkAddressSanitizerError(
2075 self.getStdErr(daemon), self.name, daemon, self.logdir
2076 ):
2077 return "%s: Daemon %s not running - killed by AddressSanitizer" % (
2078 self.name,
2079 daemon,
2080 )
2081
2082 return "%s: Daemon %s not running" % (self.name, daemon)
2083 return ""
2084
2085 def checkRouterVersion(self, cmpop, version):
2086 """
2087 Compares router version using operation `cmpop` with `version`.
2088 Valid `cmpop` values:
2089 * `>=`: has the same version or greater
2090 * '>': has greater version
2091 * '=': has the same version
2092 * '<': has a lesser version
2093 * '<=': has the same version or lesser
2094
2095 Usage example: router.checkRouterVersion('>', '1.0')
2096 """
2097
2098 # Make sure we have version information first
2099 if self.version == None:
2100 self.version = self.cmd(
2101 os.path.join(self.daemondir, "bgpd") + " -v"
2102 ).split()[2]
2103 logger.info("{}: running version: {}".format(self.name, self.version))
2104
2105 rversion = self.version
2106 if rversion == None:
2107 return False
2108
2109 result = version_cmp(rversion, version)
2110 if cmpop == ">=":
2111 return result >= 0
2112 if cmpop == ">":
2113 return result > 0
2114 if cmpop == "=":
2115 return result == 0
2116 if cmpop == "<":
2117 return result < 0
2118 if cmpop == "<":
2119 return result < 0
2120 if cmpop == "<=":
2121 return result <= 0
2122
2123 def get_ipv6_linklocal(self):
2124 "Get LinkLocal Addresses from interfaces"
2125
2126 linklocal = []
2127
2128 ifaces = self.cmd("ip -6 address")
2129 # Fix newlines (make them all the same)
2130 ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
2131 interface = ""
2132 ll_per_if_count = 0
2133 for line in ifaces:
2134 m = re.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line)
2135 if m:
2136 interface = m.group(1)
2137 ll_per_if_count = 0
2138 m = re.search(
2139 "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
2140 line,
2141 )
2142 if m:
2143 local = m.group(1)
2144 ll_per_if_count += 1
2145 if ll_per_if_count > 1:
2146 linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
2147 else:
2148 linklocal += [[interface, local]]
2149 return linklocal
2150
2151 def daemon_available(self, daemon):
2152 "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
2153
2154 daemon_path = os.path.join(self.daemondir, daemon)
2155 if not os.path.isfile(daemon_path):
2156 return False
2157 if daemon == "ldpd":
2158 if version_cmp(platform.release(), "4.5") < 0:
2159 return False
2160 if not module_present("mpls-router", load=False):
2161 return False
2162 if not module_present("mpls-iptunnel", load=False):
2163 return False
2164 return True
2165
2166 def get_routertype(self):
2167 "Return the type of Router (frr)"
2168
2169 return self.routertype
2170
2171 def report_memory_leaks(self, filename_prefix, testscript):
2172 "Report Memory Leaks to file prefixed with given string"
2173
2174 leakfound = False
2175 filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt"
2176 for daemon in self.daemons:
2177 if self.daemons[daemon] == 1:
2178 log = self.getStdErr(daemon)
2179 if "memstats" in log:
2180 # Found memory leak
2181 logger.info(
2182 "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log)
2183 )
2184 if not leakfound:
2185 leakfound = True
2186 # Check if file already exists
2187 fileexists = os.path.isfile(filename)
2188 leakfile = open(filename, "a")
2189 if not fileexists:
2190 # New file - add header
2191 leakfile.write(
2192 "# Memory Leak Detection for topotest %s\n\n"
2193 % testscript
2194 )
2195 leakfile.write("## Router %s\n" % self.name)
2196 leakfile.write("### Process %s\n" % daemon)
2197 log = re.sub("core_handler: ", "", log)
2198 log = re.sub(
2199 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
2200 r"\n#### \1\n",
2201 log,
2202 )
2203 log = re.sub("memstats: ", " ", log)
2204 leakfile.write(log)
2205 leakfile.write("\n")
2206 if leakfound:
2207 leakfile.close()
2208
2209
2210 def frr_unicode(s):
2211 """Convert string to unicode, depending on python version"""
2212 if sys.version_info[0] > 2:
2213 return s
2214 else:
2215 return unicode(s) # pylint: disable=E0602
2216
2217
2218 def is_mapping(o):
2219 return isinstance(o, Mapping)