]> git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/lib/topotest.py
Merge pull request #11061 from pguibert6WIND/debug_isis_bfd_too
[mirror_frr.git] / tests / topotests / lib / topotest.py
1 #!/usr/bin/env python
2
3 #
4 # topotest.py
5 # Library of helper functions for NetDEF Topology Tests
6 #
7 # Copyright (c) 2016 by
8 # Network Device Education Foundation, Inc. ("NetDEF")
9 #
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
13 # in all copies.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 # OF THIS SOFTWARE.
23 #
24
25 import difflib
26 import errno
27 import functools
28 import glob
29 import json
30 import os
31 import pdb
32 import platform
33 import re
34 import resource
35 import signal
36 import subprocess
37 import sys
38 import tempfile
39 import time
40 from copy import deepcopy
41
42 import lib.topolog as topolog
43 from lib.topolog import logger
44
45 if sys.version_info[0] > 2:
46 import configparser
47 from collections.abc import Mapping
48 else:
49 import ConfigParser as configparser
50 from collections import Mapping
51
52 from lib import micronet
53 from lib.micronet_compat import Node
54
55 g_extra_config = {}
56
57
58 def get_logs_path(rundir):
59 logspath = topolog.get_test_logdir()
60 return os.path.join(rundir, logspath)
61
62
63 def gdb_core(obj, daemon, corefiles):
64 gdbcmds = """
65 info threads
66 bt full
67 disassemble
68 up
69 disassemble
70 up
71 disassemble
72 up
73 disassemble
74 up
75 disassemble
76 up
77 disassemble
78 """
79 gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")]
80 gdbcmds = [item for sl in gdbcmds for item in sl]
81
82 daemon_path = os.path.join(obj.daemondir, daemon)
83 backtrace = subprocess.check_output(
84 ["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds
85 )
86 sys.stderr.write(
87 "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon)
88 )
89 sys.stderr.write("%s" % backtrace)
90 return backtrace
91
92
93 class json_cmp_result(object):
94 "json_cmp result class for better assertion messages"
95
96 def __init__(self):
97 self.errors = []
98
99 def add_error(self, error):
100 "Append error message to the result"
101 for line in error.splitlines():
102 self.errors.append(line)
103
104 def has_errors(self):
105 "Returns True if there were errors, otherwise False."
106 return len(self.errors) > 0
107
108 def gen_report(self):
109 headline = ["Generated JSON diff error report:", ""]
110 return headline + self.errors
111
112 def __str__(self):
113 return (
114 "Generated JSON diff error report:\n\n\n" + "\n".join(self.errors) + "\n\n"
115 )
116
117
118 def gen_json_diff_report(d1, d2, exact=False, path="> $", acc=(0, "")):
119 """
120 Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye.
121 """
122
123 def dump_json(v):
124 if isinstance(v, (dict, list)):
125 return "\t" + "\t".join(
126 json.dumps(v, indent=4, separators=(",", ": ")).splitlines(True)
127 )
128 else:
129 return "'{}'".format(v)
130
131 def json_type(v):
132 if isinstance(v, (list, tuple)):
133 return "Array"
134 elif isinstance(v, dict):
135 return "Object"
136 elif isinstance(v, (int, float)):
137 return "Number"
138 elif isinstance(v, bool):
139 return "Boolean"
140 elif isinstance(v, str):
141 return "String"
142 elif v == None:
143 return "null"
144
145 def get_errors(other_acc):
146 return other_acc[1]
147
148 def get_errors_n(other_acc):
149 return other_acc[0]
150
151 def add_error(acc, msg, points=1):
152 return (acc[0] + points, acc[1] + "{}: {}\n".format(path, msg))
153
154 def merge_errors(acc, other_acc):
155 return (acc[0] + other_acc[0], acc[1] + other_acc[1])
156
157 def add_idx(idx):
158 return "{}[{}]".format(path, idx)
159
160 def add_key(key):
161 return "{}->{}".format(path, key)
162
163 def has_errors(other_acc):
164 return other_acc[0] > 0
165
166 if d2 == "*" or (
167 not isinstance(d1, (list, dict))
168 and not isinstance(d2, (list, dict))
169 and d1 == d2
170 ):
171 return acc
172 elif (
173 not isinstance(d1, (list, dict))
174 and not isinstance(d2, (list, dict))
175 and d1 != d2
176 ):
177 acc = add_error(
178 acc,
179 "d1 has element with value '{}' but in d2 it has value '{}'".format(d1, d2),
180 )
181 elif (
182 isinstance(d1, list)
183 and isinstance(d2, list)
184 and ((len(d2) > 0 and d2[0] == "__ordered__") or exact)
185 ):
186 if not exact:
187 del d2[0]
188 if len(d1) != len(d2):
189 acc = add_error(
190 acc,
191 "d1 has Array of length {} but in d2 it is of length {}".format(
192 len(d1), len(d2)
193 ),
194 )
195 else:
196 for idx, v1, v2 in zip(range(0, len(d1)), d1, d2):
197 acc = merge_errors(
198 acc, gen_json_diff_report(v1, v2, exact=exact, path=add_idx(idx))
199 )
200 elif isinstance(d1, list) and isinstance(d2, list):
201 if len(d1) < len(d2):
202 acc = add_error(
203 acc,
204 "d1 has Array of length {} but in d2 it is of length {}".format(
205 len(d1), len(d2)
206 ),
207 )
208 else:
209 for idx2, v2 in zip(range(0, len(d2)), d2):
210 found_match = False
211 closest_diff = None
212 closest_idx = None
213 for idx1, v1 in zip(range(0, len(d1)), d1):
214 tmp_v1 = deepcopy(v1)
215 tmp_v2 = deepcopy(v2)
216 tmp_diff = gen_json_diff_report(tmp_v1, tmp_v2, path=add_idx(idx1))
217 if not has_errors(tmp_diff):
218 found_match = True
219 del d1[idx1]
220 break
221 elif not closest_diff or get_errors_n(tmp_diff) < get_errors_n(
222 closest_diff
223 ):
224 closest_diff = tmp_diff
225 closest_idx = idx1
226 if not found_match and isinstance(v2, (list, dict)):
227 sub_error = "\n\n\t{}".format(
228 "\t".join(get_errors(closest_diff).splitlines(True))
229 )
230 acc = add_error(
231 acc,
232 (
233 "d2 has the following element at index {} which is not present in d1: "
234 + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}"
235 ).format(idx2, dump_json(v2), closest_idx, sub_error),
236 )
237 if not found_match and not isinstance(v2, (list, dict)):
238 acc = add_error(
239 acc,
240 "d2 has the following element at index {} which is not present in d1: {}".format(
241 idx2, dump_json(v2)
242 ),
243 )
244 elif isinstance(d1, dict) and isinstance(d2, dict) and exact:
245 invalid_keys_d1 = [k for k in d1.keys() if k not in d2.keys()]
246 invalid_keys_d2 = [k for k in d2.keys() if k not in d1.keys()]
247 for k in invalid_keys_d1:
248 acc = add_error(acc, "d1 has key '{}' which is not present in d2".format(k))
249 for k in invalid_keys_d2:
250 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
251 valid_keys_intersection = [k for k in d1.keys() if k in d2.keys()]
252 for k in valid_keys_intersection:
253 acc = merge_errors(
254 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
255 )
256 elif isinstance(d1, dict) and isinstance(d2, dict):
257 none_keys = [k for k, v in d2.items() if v == None]
258 none_keys_present = [k for k in d1.keys() if k in none_keys]
259 for k in none_keys_present:
260 acc = add_error(
261 acc, "d1 has key '{}' which is not supposed to be present".format(k)
262 )
263 keys = [k for k, v in d2.items() if v != None]
264 invalid_keys_intersection = [k for k in keys if k not in d1.keys()]
265 for k in invalid_keys_intersection:
266 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
267 valid_keys_intersection = [k for k in keys if k in d1.keys()]
268 for k in valid_keys_intersection:
269 acc = merge_errors(
270 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
271 )
272 else:
273 acc = add_error(
274 acc,
275 "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format(
276 json_type(d1), json_type(d2)
277 ),
278 points=2,
279 )
280
281 return acc
282
283
284 def json_cmp(d1, d2, exact=False):
285 """
286 JSON compare function. Receives two parameters:
287 * `d1`: parsed JSON data structure
288 * `d2`: parsed JSON data structure
289
290 Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
291 in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an
292 error report is generated and wrapped in a 'json_cmp_result()'. There are special
293 parameters and notations explained below which can be used to cover rather unusual
294 cases:
295
296 * when 'exact is set to 'True' then d1 and d2 are tested for equality (including
297 order within JSON Arrays)
298 * using 'null' (or 'None' in Python) as JSON Object value is checking for key
299 absence in d1
300 * using '*' as JSON Object value or Array value is checking for presence in d1
301 without checking the values
302 * using '__ordered__' as first element in a JSON Array in d2 will also check the
303 order when it is compared to an Array in d1
304 """
305
306 (errors_n, errors) = gen_json_diff_report(deepcopy(d1), deepcopy(d2), exact=exact)
307
308 if errors_n > 0:
309 result = json_cmp_result()
310 result.add_error(errors)
311 return result
312 else:
313 return None
314
315
316 def router_output_cmp(router, cmd, expected):
317 """
318 Runs `cmd` in router and compares the output with `expected`.
319 """
320 return difflines(
321 normalize_text(router.vtysh_cmd(cmd)),
322 normalize_text(expected),
323 title1="Current output",
324 title2="Expected output",
325 )
326
327
328 def router_json_cmp(router, cmd, data, exact=False):
329 """
330 Runs `cmd` that returns JSON data (normally the command ends with 'json')
331 and compare with `data` contents.
332 """
333 return json_cmp(router.vtysh_cmd(cmd, isjson=True), data, exact)
334
335
336 def run_and_expect(func, what, count=20, wait=3):
337 """
338 Run `func` and compare the result with `what`. Do it for `count` times
339 waiting `wait` seconds between tries. By default it tries 20 times with
340 3 seconds delay between tries.
341
342 Returns (True, func-return) on success or
343 (False, func-return) on failure.
344
345 ---
346
347 Helper functions to use with this function:
348 - router_output_cmp
349 - router_json_cmp
350 """
351 start_time = time.time()
352 func_name = "<unknown>"
353 if func.__class__ == functools.partial:
354 func_name = func.func.__name__
355 else:
356 func_name = func.__name__
357
358 logger.info(
359 "'{}' polling started (interval {} secs, maximum {} tries)".format(
360 func_name, wait, count
361 )
362 )
363
364 while count > 0:
365 result = func()
366 if result != what:
367 time.sleep(wait)
368 count -= 1
369 continue
370
371 end_time = time.time()
372 logger.info(
373 "'{}' succeeded after {:.2f} seconds".format(
374 func_name, end_time - start_time
375 )
376 )
377 return (True, result)
378
379 end_time = time.time()
380 logger.error(
381 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
382 )
383 return (False, result)
384
385
386 def run_and_expect_type(func, etype, count=20, wait=3, avalue=None):
387 """
388 Run `func` and compare the result with `etype`. Do it for `count` times
389 waiting `wait` seconds between tries. By default it tries 20 times with
390 3 seconds delay between tries.
391
392 This function is used when you want to test the return type and,
393 optionally, the return value.
394
395 Returns (True, func-return) on success or
396 (False, func-return) on failure.
397 """
398 start_time = time.time()
399 func_name = "<unknown>"
400 if func.__class__ == functools.partial:
401 func_name = func.func.__name__
402 else:
403 func_name = func.__name__
404
405 logger.info(
406 "'{}' polling started (interval {} secs, maximum wait {} secs)".format(
407 func_name, wait, int(wait * count)
408 )
409 )
410
411 while count > 0:
412 result = func()
413 if not isinstance(result, etype):
414 logger.debug(
415 "Expected result type '{}' got '{}' instead".format(etype, type(result))
416 )
417 time.sleep(wait)
418 count -= 1
419 continue
420
421 if etype != type(None) and avalue != None and result != avalue:
422 logger.debug("Expected value '{}' got '{}' instead".format(avalue, result))
423 time.sleep(wait)
424 count -= 1
425 continue
426
427 end_time = time.time()
428 logger.info(
429 "'{}' succeeded after {:.2f} seconds".format(
430 func_name, end_time - start_time
431 )
432 )
433 return (True, result)
434
435 end_time = time.time()
436 logger.error(
437 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
438 )
439 return (False, result)
440
441
442 def router_json_cmp_retry(router, cmd, data, exact=False, retry_timeout=10.0):
443 """
444 Runs `cmd` that returns JSON data (normally the command ends with 'json')
445 and compare with `data` contents. Retry by default for 10 seconds
446 """
447
448 def test_func():
449 return router_json_cmp(router, cmd, data, exact)
450
451 ok, _ = run_and_expect(test_func, None, int(retry_timeout), 1)
452 return ok
453
454
455 def int2dpid(dpid):
456 "Converting Integer to DPID"
457
458 try:
459 dpid = hex(dpid)[2:]
460 dpid = "0" * (16 - len(dpid)) + dpid
461 return dpid
462 except IndexError:
463 raise Exception(
464 "Unable to derive default datapath ID - "
465 "please either specify a dpid or use a "
466 "canonical switch name such as s23."
467 )
468
469
470 def pid_exists(pid):
471 "Check whether pid exists in the current process table."
472
473 if pid <= 0:
474 return False
475 try:
476 os.waitpid(pid, os.WNOHANG)
477 except:
478 pass
479 try:
480 os.kill(pid, 0)
481 except OSError as err:
482 if err.errno == errno.ESRCH:
483 # ESRCH == No such process
484 return False
485 elif err.errno == errno.EPERM:
486 # EPERM clearly means there's a process to deny access to
487 return True
488 else:
489 # According to "man 2 kill" possible error values are
490 # (EINVAL, EPERM, ESRCH)
491 raise
492 else:
493 return True
494
495
496 def get_textdiff(text1, text2, title1="", title2="", **opts):
497 "Returns empty string if same or formatted diff"
498
499 diff = "\n".join(
500 difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts)
501 )
502 # Clean up line endings
503 diff = os.linesep.join([s for s in diff.splitlines() if s])
504 return diff
505
506
507 def difflines(text1, text2, title1="", title2="", **opts):
508 "Wrapper for get_textdiff to avoid string transformations."
509 text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1)
510 text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1)
511 return get_textdiff(text1, text2, title1, title2, **opts)
512
513
514 def get_file(content):
515 """
516 Generates a temporary file in '/tmp' with `content` and returns the file name.
517 """
518 if isinstance(content, list) or isinstance(content, tuple):
519 content = "\n".join(content)
520 fde = tempfile.NamedTemporaryFile(mode="w", delete=False)
521 fname = fde.name
522 fde.write(content)
523 fde.close()
524 return fname
525
526
527 def normalize_text(text):
528 """
529 Strips formating spaces/tabs, carriage returns and trailing whitespace.
530 """
531 text = re.sub(r"[ \t]+", " ", text)
532 text = re.sub(r"\r", "", text)
533
534 # Remove whitespace in the middle of text.
535 text = re.sub(r"[ \t]+\n", "\n", text)
536 # Remove whitespace at the end of the text.
537 text = text.rstrip()
538
539 return text
540
541
542 def is_linux():
543 """
544 Parses unix name output to check if running on GNU/Linux.
545
546 Returns True if running on Linux, returns False otherwise.
547 """
548
549 if os.uname()[0] == "Linux":
550 return True
551 return False
552
553
554 def iproute2_is_vrf_capable():
555 """
556 Checks if the iproute2 version installed on the system is capable of
557 handling VRFs by interpreting the output of the 'ip' utility found in PATH.
558
559 Returns True if capability can be detected, returns False otherwise.
560 """
561
562 if is_linux():
563 try:
564 subp = subprocess.Popen(
565 ["ip", "route", "show", "vrf"],
566 stdout=subprocess.PIPE,
567 stderr=subprocess.PIPE,
568 stdin=subprocess.PIPE,
569 )
570 iproute2_err = subp.communicate()[1].splitlines()[0].split()[0]
571
572 if iproute2_err != "Error:":
573 return True
574 except Exception:
575 pass
576 return False
577
578
579 def module_present_linux(module, load):
580 """
581 Returns whether `module` is present.
582
583 If `load` is true, it will try to load it via modprobe.
584 """
585 with open("/proc/modules", "r") as modules_file:
586 if module.replace("-", "_") in modules_file.read():
587 return True
588 cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module)
589 if os.system(cmd) != 0:
590 return False
591 else:
592 return True
593
594
595 def module_present_freebsd(module, load):
596 return True
597
598
599 def module_present(module, load=True):
600 if sys.platform.startswith("linux"):
601 return module_present_linux(module, load)
602 elif sys.platform.startswith("freebsd"):
603 return module_present_freebsd(module, load)
604
605
606 def version_cmp(v1, v2):
607 """
608 Compare two version strings and returns:
609
610 * `-1`: if `v1` is less than `v2`
611 * `0`: if `v1` is equal to `v2`
612 * `1`: if `v1` is greater than `v2`
613
614 Raises `ValueError` if versions are not well formated.
615 """
616 vregex = r"(?P<whole>\d+(\.(\d+))*)"
617 v1m = re.match(vregex, v1)
618 v2m = re.match(vregex, v2)
619 if v1m is None or v2m is None:
620 raise ValueError("got a invalid version string")
621
622 # Split values
623 v1g = v1m.group("whole").split(".")
624 v2g = v2m.group("whole").split(".")
625
626 # Get the longest version string
627 vnum = len(v1g)
628 if len(v2g) > vnum:
629 vnum = len(v2g)
630
631 # Reverse list because we are going to pop the tail
632 v1g.reverse()
633 v2g.reverse()
634 for _ in range(vnum):
635 try:
636 v1n = int(v1g.pop())
637 except IndexError:
638 while v2g:
639 v2n = int(v2g.pop())
640 if v2n > 0:
641 return -1
642 break
643
644 try:
645 v2n = int(v2g.pop())
646 except IndexError:
647 if v1n > 0:
648 return 1
649 while v1g:
650 v1n = int(v1g.pop())
651 if v1n > 0:
652 return 1
653 break
654
655 if v1n > v2n:
656 return 1
657 if v1n < v2n:
658 return -1
659 return 0
660
661
662 def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None):
663 if ifaceaction:
664 str_ifaceaction = "no shutdown"
665 else:
666 str_ifaceaction = "shutdown"
667 if vrf_name == None:
668 cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
669 ifacename, str_ifaceaction
670 )
671 else:
672 cmd = (
673 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
674 ifacename, vrf_name, str_ifaceaction
675 )
676 )
677 node.run(cmd)
678
679
680 def ip4_route_zebra(node, vrf_name=None):
681 """
682 Gets an output of 'show ip route' command. It can be used
683 with comparing the output to a reference
684 """
685 if vrf_name == None:
686 tmp = node.vtysh_cmd("show ip route")
687 else:
688 tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name))
689 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
690
691 lines = output.splitlines()
692 header_found = False
693 while lines and (not lines[0].strip() or not header_found):
694 if "o - offload failure" in lines[0]:
695 header_found = True
696 lines = lines[1:]
697 return "\n".join(lines)
698
699
700 def ip6_route_zebra(node, vrf_name=None):
701 """
702 Retrieves the output of 'show ipv6 route [vrf vrf_name]', then
703 canonicalizes it by eliding link-locals.
704 """
705
706 if vrf_name == None:
707 tmp = node.vtysh_cmd("show ipv6 route")
708 else:
709 tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name))
710
711 # Mask out timestamp
712 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
713
714 # Mask out the link-local addresses
715 output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output)
716
717 lines = output.splitlines()
718 header_found = False
719 while lines and (not lines[0].strip() or not header_found):
720 if "o - offload failure" in lines[0]:
721 header_found = True
722 lines = lines[1:]
723
724 return "\n".join(lines)
725
726
727 def proto_name_to_number(protocol):
728 return {
729 "bgp": "186",
730 "isis": "187",
731 "ospf": "188",
732 "rip": "189",
733 "ripng": "190",
734 "nhrp": "191",
735 "eigrp": "192",
736 "ldp": "193",
737 "sharp": "194",
738 "pbr": "195",
739 "static": "196",
740 "ospf6": "197",
741 }.get(
742 protocol, protocol
743 ) # default return same as input
744
745
746 def ip4_route(node):
747 """
748 Gets a structured return of the command 'ip route'. It can be used in
749 conjunction with json_cmp() to provide accurate assert explanations.
750
751 Return example:
752 {
753 '10.0.1.0/24': {
754 'dev': 'eth0',
755 'via': '172.16.0.1',
756 'proto': '188',
757 },
758 '10.0.2.0/24': {
759 'dev': 'eth1',
760 'proto': 'kernel',
761 }
762 }
763 """
764 output = normalize_text(node.run("ip route")).splitlines()
765 result = {}
766 for line in output:
767 columns = line.split(" ")
768 route = result[columns[0]] = {}
769 prev = None
770 for column in columns:
771 if prev == "dev":
772 route["dev"] = column
773 if prev == "via":
774 route["via"] = column
775 if prev == "proto":
776 # translate protocol names back to numbers
777 route["proto"] = proto_name_to_number(column)
778 if prev == "metric":
779 route["metric"] = column
780 if prev == "scope":
781 route["scope"] = column
782 prev = column
783
784 return result
785
786
787 def ip4_vrf_route(node):
788 """
789 Gets a structured return of the command 'ip route show vrf {0}-cust1'.
790 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
791
792 Return example:
793 {
794 '10.0.1.0/24': {
795 'dev': 'eth0',
796 'via': '172.16.0.1',
797 'proto': '188',
798 },
799 '10.0.2.0/24': {
800 'dev': 'eth1',
801 'proto': 'kernel',
802 }
803 }
804 """
805 output = normalize_text(
806 node.run("ip route show vrf {0}-cust1".format(node.name))
807 ).splitlines()
808
809 result = {}
810 for line in output:
811 columns = line.split(" ")
812 route = result[columns[0]] = {}
813 prev = None
814 for column in columns:
815 if prev == "dev":
816 route["dev"] = column
817 if prev == "via":
818 route["via"] = column
819 if prev == "proto":
820 # translate protocol names back to numbers
821 route["proto"] = proto_name_to_number(column)
822 if prev == "metric":
823 route["metric"] = column
824 if prev == "scope":
825 route["scope"] = column
826 prev = column
827
828 return result
829
830
831 def ip6_route(node):
832 """
833 Gets a structured return of the command 'ip -6 route'. It can be used in
834 conjunction with json_cmp() to provide accurate assert explanations.
835
836 Return example:
837 {
838 '2001:db8:1::/64': {
839 'dev': 'eth0',
840 'proto': '188',
841 },
842 '2001:db8:2::/64': {
843 'dev': 'eth1',
844 'proto': 'kernel',
845 }
846 }
847 """
848 output = normalize_text(node.run("ip -6 route")).splitlines()
849 result = {}
850 for line in output:
851 columns = line.split(" ")
852 route = result[columns[0]] = {}
853 prev = None
854 for column in columns:
855 if prev == "dev":
856 route["dev"] = column
857 if prev == "via":
858 route["via"] = column
859 if prev == "proto":
860 # translate protocol names back to numbers
861 route["proto"] = proto_name_to_number(column)
862 if prev == "metric":
863 route["metric"] = column
864 if prev == "pref":
865 route["pref"] = column
866 prev = column
867
868 return result
869
870
871 def ip6_vrf_route(node):
872 """
873 Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'.
874 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
875
876 Return example:
877 {
878 '2001:db8:1::/64': {
879 'dev': 'eth0',
880 'proto': '188',
881 },
882 '2001:db8:2::/64': {
883 'dev': 'eth1',
884 'proto': 'kernel',
885 }
886 }
887 """
888 output = normalize_text(
889 node.run("ip -6 route show vrf {0}-cust1".format(node.name))
890 ).splitlines()
891 result = {}
892 for line in output:
893 columns = line.split(" ")
894 route = result[columns[0]] = {}
895 prev = None
896 for column in columns:
897 if prev == "dev":
898 route["dev"] = column
899 if prev == "via":
900 route["via"] = column
901 if prev == "proto":
902 # translate protocol names back to numbers
903 route["proto"] = proto_name_to_number(column)
904 if prev == "metric":
905 route["metric"] = column
906 if prev == "pref":
907 route["pref"] = column
908 prev = column
909
910 return result
911
912
913 def ip_rules(node):
914 """
915 Gets a structured return of the command 'ip rule'. It can be used in
916 conjunction with json_cmp() to provide accurate assert explanations.
917
918 Return example:
919 [
920 {
921 "pref": "0"
922 "from": "all"
923 },
924 {
925 "pref": "32766"
926 "from": "all"
927 },
928 {
929 "to": "3.4.5.0/24",
930 "iif": "r1-eth2",
931 "pref": "304",
932 "from": "1.2.0.0/16",
933 "proto": "zebra"
934 }
935 ]
936 """
937 output = normalize_text(node.run("ip rule")).splitlines()
938 result = []
939 for line in output:
940 columns = line.split(" ")
941
942 route = {}
943 # remove last character, since it is ':'
944 pref = columns[0][:-1]
945 route["pref"] = pref
946 prev = None
947 for column in columns:
948 if prev == "from":
949 route["from"] = column
950 if prev == "to":
951 route["to"] = column
952 if prev == "proto":
953 route["proto"] = column
954 if prev == "iif":
955 route["iif"] = column
956 if prev == "fwmark":
957 route["fwmark"] = column
958 prev = column
959
960 result.append(route)
961 return result
962
963
964 def sleep(amount, reason=None):
965 """
966 Sleep wrapper that registers in the log the amount of sleep
967 """
968 if reason is None:
969 logger.info("Sleeping for {} seconds".format(amount))
970 else:
971 logger.info(reason + " ({} seconds)".format(amount))
972
973 time.sleep(amount)
974
975
976 def checkAddressSanitizerError(output, router, component, logdir=""):
977 "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
978
979 def processAddressSanitizerError(asanErrorRe, output, router, component):
980 sys.stderr.write(
981 "%s: %s triggered an exception by AddressSanitizer\n" % (router, component)
982 )
983 # Sanitizer Error found in log
984 pidMark = asanErrorRe.group(1)
985 addressSanitizerLog = re.search(
986 "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL
987 )
988 if addressSanitizerLog:
989 # Find Calling Test. Could be multiple steps back
990 testframe = sys._current_frames().values()[0]
991 level = 0
992 while level < 10:
993 test = os.path.splitext(
994 os.path.basename(testframe.f_globals["__file__"])
995 )[0]
996 if (test != "topotest") and (test != "topogen"):
997 # Found the calling test
998 callingTest = os.path.basename(testframe.f_globals["__file__"])
999 break
1000 level = level + 1
1001 testframe = testframe.f_back
1002 if level >= 10:
1003 # somehow couldn't find the test script.
1004 callingTest = "unknownTest"
1005 #
1006 # Now finding Calling Procedure
1007 level = 0
1008 while level < 20:
1009 callingProc = sys._getframe(level).f_code.co_name
1010 if (
1011 (callingProc != "processAddressSanitizerError")
1012 and (callingProc != "checkAddressSanitizerError")
1013 and (callingProc != "checkRouterCores")
1014 and (callingProc != "stopRouter")
1015 and (callingProc != "stop")
1016 and (callingProc != "stop_topology")
1017 and (callingProc != "checkRouterRunning")
1018 and (callingProc != "check_router_running")
1019 and (callingProc != "routers_have_failure")
1020 ):
1021 # Found the calling test
1022 break
1023 level = level + 1
1024 if level >= 20:
1025 # something wrong - couldn't found the calling test function
1026 callingProc = "unknownProc"
1027 with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile:
1028 sys.stderr.write(
1029 "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1030 % (callingTest, callingProc, router)
1031 )
1032 sys.stderr.write(
1033 "\n".join(addressSanitizerLog.group(1).splitlines()) + "\n"
1034 )
1035 addrSanFile.write("## Error: %s\n\n" % asanErrorRe.group(2))
1036 addrSanFile.write(
1037 "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1038 % (callingTest, callingProc, router)
1039 )
1040 addrSanFile.write(
1041 " "
1042 + "\n ".join(addressSanitizerLog.group(1).splitlines())
1043 + "\n"
1044 )
1045 addrSanFile.write("\n---------------\n")
1046 return
1047
1048 addressSanitizerError = re.search(
1049 r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
1050 )
1051 if addressSanitizerError:
1052 processAddressSanitizerError(addressSanitizerError, output, router, component)
1053 return True
1054
1055 # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
1056 if logdir:
1057 filepattern = logdir + "/" + router + "/" + component + ".asan.*"
1058 logger.debug(
1059 "Log check for %s on %s, pattern %s\n" % (component, router, filepattern)
1060 )
1061 for file in glob.glob(filepattern):
1062 with open(file, "r") as asanErrorFile:
1063 asanError = asanErrorFile.read()
1064 addressSanitizerError = re.search(
1065 r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
1066 )
1067 if addressSanitizerError:
1068 processAddressSanitizerError(
1069 addressSanitizerError, asanError, router, component
1070 )
1071 return True
1072 return False
1073
1074
1075 def _sysctl_atleast(commander, variable, min_value):
1076 if isinstance(min_value, tuple):
1077 min_value = list(min_value)
1078 is_list = isinstance(min_value, list)
1079
1080 sval = commander.cmd_raises("sysctl -n " + variable).strip()
1081 if is_list:
1082 cur_val = [int(x) for x in sval.split()]
1083 else:
1084 cur_val = int(sval)
1085
1086 set_value = False
1087 if is_list:
1088 for i, v in enumerate(cur_val):
1089 if v < min_value[i]:
1090 set_value = True
1091 else:
1092 min_value[i] = v
1093 else:
1094 if cur_val < min_value:
1095 set_value = True
1096 if set_value:
1097 if is_list:
1098 valstr = " ".join([str(x) for x in min_value])
1099 else:
1100 valstr = str(min_value)
1101 logger.info("Increasing sysctl %s from %s to %s", variable, cur_val, valstr)
1102 commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
1103
1104
1105 def _sysctl_assure(commander, variable, value):
1106 if isinstance(value, tuple):
1107 value = list(value)
1108 is_list = isinstance(value, list)
1109
1110 sval = commander.cmd_raises("sysctl -n " + variable).strip()
1111 if is_list:
1112 cur_val = [int(x) for x in sval.split()]
1113 else:
1114 cur_val = sval
1115
1116 set_value = False
1117 if is_list:
1118 for i, v in enumerate(cur_val):
1119 if v != value[i]:
1120 set_value = True
1121 else:
1122 value[i] = v
1123 else:
1124 if cur_val != str(value):
1125 set_value = True
1126
1127 if set_value:
1128 if is_list:
1129 valstr = " ".join([str(x) for x in value])
1130 else:
1131 valstr = str(value)
1132 logger.info("Changing sysctl %s from %s to %s", variable, cur_val, valstr)
1133 commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
1134
1135
1136 def sysctl_atleast(commander, variable, min_value, raises=False):
1137 try:
1138 if commander is None:
1139 commander = micronet.Commander("topotest")
1140 return _sysctl_atleast(commander, variable, min_value)
1141 except subprocess.CalledProcessError as error:
1142 logger.warning(
1143 "%s: Failed to assure sysctl min value %s = %s",
1144 commander,
1145 variable,
1146 min_value,
1147 )
1148 if raises:
1149 raise
1150
1151
1152 def sysctl_assure(commander, variable, value, raises=False):
1153 try:
1154 if commander is None:
1155 commander = micronet.Commander("topotest")
1156 return _sysctl_assure(commander, variable, value)
1157 except subprocess.CalledProcessError as error:
1158 logger.warning(
1159 "%s: Failed to assure sysctl value %s = %s",
1160 commander,
1161 variable,
1162 value,
1163 exc_info=True,
1164 )
1165 if raises:
1166 raise
1167
1168
1169 def rlimit_atleast(rname, min_value, raises=False):
1170 try:
1171 cval = resource.getrlimit(rname)
1172 soft, hard = cval
1173 if soft < min_value:
1174 nval = (min_value, hard if min_value < hard else min_value)
1175 logger.info("Increasing rlimit %s from %s to %s", rname, cval, nval)
1176 resource.setrlimit(rname, nval)
1177 except subprocess.CalledProcessError as error:
1178 logger.warning(
1179 "Failed to assure rlimit [%s] = %s", rname, min_value, exc_info=True
1180 )
1181 if raises:
1182 raise
1183
1184
1185 def fix_netns_limits(ns):
1186
1187 # Maximum read and write socket buffer sizes
1188 sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20])
1189 sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20])
1190
1191 sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0)
1192 sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0)
1193 sysctl_assure(ns, "net.ipv4.conf.lo.rp_filter", 0)
1194
1195 sysctl_assure(ns, "net.ipv4.conf.all.forwarding", 1)
1196 sysctl_assure(ns, "net.ipv4.conf.default.forwarding", 1)
1197
1198 # XXX if things fail look here as this wasn't done previously
1199 sysctl_assure(ns, "net.ipv6.conf.all.forwarding", 1)
1200 sysctl_assure(ns, "net.ipv6.conf.default.forwarding", 1)
1201
1202 # ARP
1203 sysctl_assure(ns, "net.ipv4.conf.default.arp_announce", 2)
1204 sysctl_assure(ns, "net.ipv4.conf.default.arp_notify", 1)
1205 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1206 sysctl_assure(ns, "net.ipv4.conf.default.arp_ignore", 0)
1207 sysctl_assure(ns, "net.ipv4.conf.all.arp_announce", 2)
1208 sysctl_assure(ns, "net.ipv4.conf.all.arp_notify", 1)
1209 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1210 sysctl_assure(ns, "net.ipv4.conf.all.arp_ignore", 0)
1211
1212 sysctl_assure(ns, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
1213
1214 # Keep ipv6 permanent addresses on an admin down
1215 sysctl_assure(ns, "net.ipv6.conf.all.keep_addr_on_down", 1)
1216 if version_cmp(platform.release(), "4.20") >= 0:
1217 sysctl_assure(ns, "net.ipv6.route.skip_notify_on_dev_down", 1)
1218
1219 sysctl_assure(ns, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
1220 sysctl_assure(ns, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
1221
1222 # igmp
1223 sysctl_atleast(ns, "net.ipv4.igmp_max_memberships", 1000)
1224
1225 # Use neigh information on selection of nexthop for multipath hops
1226 sysctl_assure(ns, "net.ipv4.fib_multipath_use_neigh", 1)
1227
1228
1229 def fix_host_limits():
1230 """Increase system limits."""
1231
1232 rlimit_atleast(resource.RLIMIT_NPROC, 8 * 1024)
1233 rlimit_atleast(resource.RLIMIT_NOFILE, 16 * 1024)
1234 sysctl_atleast(None, "fs.file-max", 16 * 1024)
1235 sysctl_atleast(None, "kernel.pty.max", 16 * 1024)
1236
1237 # Enable coredumps
1238 # Original on ubuntu 17.x, but apport won't save as in namespace
1239 # |/usr/share/apport/apport %p %s %c %d %P
1240 sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
1241 sysctl_assure(None, "kernel.core_uses_pid", 1)
1242 sysctl_assure(None, "fs.suid_dumpable", 1)
1243
1244 # Maximum connection backlog
1245 sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
1246
1247 # Maximum read and write socket buffer sizes
1248 sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20)
1249 sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20)
1250
1251 # Garbage Collection Settings for ARP and Neighbors
1252 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
1253 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024)
1254 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024)
1255 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024)
1256 # Hold entries for 10 minutes
1257 sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1258 sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1259
1260 # igmp
1261 sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
1262
1263 # MLD
1264 sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
1265
1266 # Increase routing table size to 128K
1267 sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024)
1268 sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024)
1269
1270
1271 def setup_node_tmpdir(logdir, name):
1272 # Cleanup old log, valgrind, and core files.
1273 subprocess.check_call(
1274 "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir, name), shell=True
1275 )
1276
1277 # Setup the per node directory.
1278 nodelogdir = "{}/{}".format(logdir, name)
1279 subprocess.check_call(
1280 "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True
1281 )
1282 logfile = "{0}/{1}.log".format(logdir, name)
1283 return logfile
1284
1285
1286 class Router(Node):
1287 "A Node with IPv4/IPv6 forwarding enabled"
1288
1289 def __init__(self, name, **params):
1290
1291 # Backward compatibility:
1292 # Load configuration defaults like topogen.
1293 self.config_defaults = configparser.ConfigParser(
1294 defaults={
1295 "verbosity": "info",
1296 "frrdir": "/usr/lib/frr",
1297 "routertype": "frr",
1298 "memleak_path": "",
1299 }
1300 )
1301
1302 self.config_defaults.read(
1303 os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini")
1304 )
1305
1306 # If this topology is using old API and doesn't have logdir
1307 # specified, then attempt to generate an unique logdir.
1308 self.logdir = params.get("logdir")
1309 if self.logdir is None:
1310 self.logdir = get_logs_path(g_extra_config["rundir"])
1311
1312 if not params.get("logger"):
1313 # If logger is present topogen has already set this up
1314 logfile = setup_node_tmpdir(self.logdir, name)
1315 l = topolog.get_logger(name, log_level="debug", target=logfile)
1316 params["logger"] = l
1317
1318 super(Router, self).__init__(name, **params)
1319
1320 self.daemondir = None
1321 self.hasmpls = False
1322 self.routertype = "frr"
1323 self.unified_config = None
1324 self.daemons = {
1325 "zebra": 0,
1326 "ripd": 0,
1327 "ripngd": 0,
1328 "ospfd": 0,
1329 "ospf6d": 0,
1330 "isisd": 0,
1331 "bgpd": 0,
1332 "pimd": 0,
1333 "ldpd": 0,
1334 "eigrpd": 0,
1335 "nhrpd": 0,
1336 "staticd": 0,
1337 "bfdd": 0,
1338 "sharpd": 0,
1339 "babeld": 0,
1340 "pbrd": 0,
1341 "pathd": 0,
1342 "snmpd": 0,
1343 }
1344 self.daemons_options = {"zebra": ""}
1345 self.reportCores = True
1346 self.version = None
1347
1348 self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid)
1349 try:
1350 # Allow escaping from running inside docker
1351 cgroup = open("/proc/1/cgroup").read()
1352 m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup)
1353 if m:
1354 self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd
1355 except IOError:
1356 pass
1357 else:
1358 logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd))
1359
1360 def _config_frr(self, **params):
1361 "Configure FRR binaries"
1362 self.daemondir = params.get("frrdir")
1363 if self.daemondir is None:
1364 self.daemondir = self.config_defaults.get("topogen", "frrdir")
1365
1366 zebra_path = os.path.join(self.daemondir, "zebra")
1367 if not os.path.isfile(zebra_path):
1368 raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
1369
1370 # pylint: disable=W0221
1371 # Some params are only meaningful for the parent class.
1372 def config(self, **params):
1373 super(Router, self).config(**params)
1374
1375 # User did not specify the daemons directory, try to autodetect it.
1376 self.daemondir = params.get("daemondir")
1377 if self.daemondir is None:
1378 self.routertype = params.get(
1379 "routertype", self.config_defaults.get("topogen", "routertype")
1380 )
1381 self._config_frr(**params)
1382 else:
1383 # Test the provided path
1384 zpath = os.path.join(self.daemondir, "zebra")
1385 if not os.path.isfile(zpath):
1386 raise Exception("No zebra binary found in {}".format(zpath))
1387 # Allow user to specify routertype when the path was specified.
1388 if params.get("routertype") is not None:
1389 self.routertype = params.get("routertype")
1390
1391 # Set ownership of config files
1392 self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype))
1393
1394 def terminate(self):
1395 # Stop running FRR daemons
1396 self.stopRouter()
1397 super(Router, self).terminate()
1398 os.system("chmod -R go+rw " + self.logdir)
1399
1400 # Return count of running daemons
1401 def listDaemons(self):
1402 ret = []
1403 rc, stdout, _ = self.cmd_status(
1404 "ls -1 /var/run/%s/*.pid" % self.routertype, warn=False
1405 )
1406 if rc:
1407 return ret
1408 for d in stdout.strip().split("\n"):
1409 pidfile = d.strip()
1410 try:
1411 pid = int(self.cmd_raises("cat %s" % pidfile, warn=False).strip())
1412 name = os.path.basename(pidfile[:-4])
1413
1414 # probably not compatible with bsd.
1415 rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False)
1416 if rc:
1417 logger.warning(
1418 "%s: %s exited leaving pidfile %s (%s)",
1419 self.name,
1420 name,
1421 pidfile,
1422 pid,
1423 )
1424 self.cmd("rm -- " + pidfile)
1425 else:
1426 ret.append((name, pid))
1427 except (subprocess.CalledProcessError, ValueError):
1428 pass
1429 return ret
1430
1431 def stopRouter(self, assertOnError=True, minErrorVersion="5.1"):
1432 # Stop Running FRR Daemons
1433 running = self.listDaemons()
1434 if not running:
1435 return ""
1436
1437 logger.info("%s: stopping %s", self.name, ", ".join([x[0] for x in running]))
1438 for name, pid in running:
1439 logger.info("{}: sending SIGTERM to {}".format(self.name, name))
1440 try:
1441 os.kill(pid, signal.SIGTERM)
1442 except OSError as err:
1443 logger.info(
1444 "%s: could not kill %s (%s): %s", self.name, name, pid, str(err)
1445 )
1446
1447 running = self.listDaemons()
1448 if running:
1449 for _ in range(0, 30):
1450 sleep(
1451 0.5,
1452 "{}: waiting for daemons stopping: {}".format(
1453 self.name, ", ".join([x[0] for x in running])
1454 ),
1455 )
1456 running = self.listDaemons()
1457 if not running:
1458 break
1459
1460 if not running:
1461 return ""
1462
1463 logger.warning(
1464 "%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running])
1465 )
1466 for name, pid in running:
1467 pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
1468 logger.info("%s: killing %s", self.name, name)
1469 self.cmd("kill -SIGBUS %d" % pid)
1470 self.cmd("rm -- " + pidfile)
1471
1472 sleep(
1473 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name
1474 )
1475
1476 errors = self.checkRouterCores(reportOnce=True)
1477 if self.checkRouterVersion("<", minErrorVersion):
1478 # ignore errors in old versions
1479 errors = ""
1480 if assertOnError and (errors is not None) and len(errors) > 0:
1481 assert "Errors found - details follow:" == 0, errors
1482 return errors
1483
1484 def removeIPs(self):
1485 for interface in self.intfNames():
1486 try:
1487 self.intf_ip_cmd(interface, "ip address flush " + interface)
1488 except Exception as ex:
1489 logger.error("%s can't remove IPs %s", self, str(ex))
1490 # pdb.set_trace()
1491 # assert False, "can't remove IPs %s" % str(ex)
1492
1493 def checkCapability(self, daemon, param):
1494 if param is not None:
1495 daemon_path = os.path.join(self.daemondir, daemon)
1496 daemon_search_option = param.replace("-", "")
1497 output = self.cmd(
1498 "{0} -h | grep {1}".format(daemon_path, daemon_search_option)
1499 )
1500 if daemon_search_option not in output:
1501 return False
1502 return True
1503
1504 def loadConf(self, daemon, source=None, param=None):
1505 """Enabled and set config for a daemon.
1506
1507 Arranges for loading of daemon configuration from the specified source. Possible
1508 `source` values are `None` for an empty config file, a path name which is used
1509 directly, or a file name with no path components which is first looked for
1510 directly and then looked for under a sub-directory named after router.
1511 """
1512
1513 # Unfortunately this API allowsfor source to not exist for any and all routers.
1514 if source:
1515 head, tail = os.path.split(source)
1516 if not head and not self.path_exists(tail):
1517 script_dir = os.environ["PYTEST_TOPOTEST_SCRIPTDIR"]
1518 router_relative = os.path.join(script_dir, self.name, tail)
1519 if self.path_exists(router_relative):
1520 source = router_relative
1521 self.logger.info(
1522 "using router relative configuration: {}".format(source)
1523 )
1524
1525 # print "Daemons before:", self.daemons
1526 if daemon in self.daemons.keys() or daemon == "frr":
1527 if daemon == "frr":
1528 self.unified_config = 1
1529 else:
1530 self.daemons[daemon] = 1
1531 if param is not None:
1532 self.daemons_options[daemon] = param
1533 conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon)
1534 if source is None or not os.path.exists(source):
1535 if daemon == "frr" or not self.unified_config:
1536 self.cmd_raises("rm -f " + conf_file)
1537 self.cmd_raises("touch " + conf_file)
1538 else:
1539 self.cmd_raises("cp {} {}".format(source, conf_file))
1540
1541 if not self.unified_config or daemon == "frr":
1542 self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file))
1543 self.cmd_raises("chmod 664 {}".format(conf_file))
1544
1545 if (daemon == "snmpd") and (self.routertype == "frr"):
1546 # /etc/snmp is private mount now
1547 self.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf')
1548 self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
1549
1550 if (daemon == "zebra") and (self.daemons["staticd"] == 0):
1551 # Add staticd with zebra - if it exists
1552 try:
1553 staticd_path = os.path.join(self.daemondir, "staticd")
1554 except:
1555 pdb.set_trace()
1556
1557 if os.path.isfile(staticd_path):
1558 self.daemons["staticd"] = 1
1559 self.daemons_options["staticd"] = ""
1560 # Auto-Started staticd has no config, so it will read from zebra config
1561 else:
1562 logger.info("No daemon {} known".format(daemon))
1563 # print "Daemons after:", self.daemons
1564
1565 def runInWindow(self, cmd, title=None):
1566 return self.run_in_window(cmd, title)
1567
1568 def startRouter(self, tgen=None):
1569 if self.unified_config:
1570 self.cmd(
1571 'echo "service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1572 % self.routertype
1573 )
1574 else:
1575 # Disable integrated-vtysh-config
1576 self.cmd(
1577 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1578 % self.routertype
1579 )
1580
1581 self.cmd(
1582 "chown %s:%svty /etc/%s/vtysh.conf"
1583 % (self.routertype, self.routertype, self.routertype)
1584 )
1585 # TODO remove the following lines after all tests are migrated to Topogen.
1586 # Try to find relevant old logfiles in /tmp and delete them
1587 map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
1588 # Remove old core files
1589 map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name)))
1590 # Remove IP addresses from OS first - we have them in zebra.conf
1591 self.removeIPs()
1592 # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
1593 # No error - but return message and skip all the tests
1594 if self.daemons["ldpd"] == 1:
1595 ldpd_path = os.path.join(self.daemondir, "ldpd")
1596 if not os.path.isfile(ldpd_path):
1597 logger.info("LDP Test, but no ldpd compiled or installed")
1598 return "LDP Test, but no ldpd compiled or installed"
1599
1600 if version_cmp(platform.release(), "4.5") < 0:
1601 logger.info("LDP Test need Linux Kernel 4.5 minimum")
1602 return "LDP Test need Linux Kernel 4.5 minimum"
1603 # Check if have mpls
1604 if tgen != None:
1605 self.hasmpls = tgen.hasmpls
1606 if self.hasmpls != True:
1607 logger.info(
1608 "LDP/MPLS Tests will be skipped, platform missing module(s)"
1609 )
1610 else:
1611 # Test for MPLS Kernel modules available
1612 self.hasmpls = False
1613 if not module_present("mpls-router"):
1614 logger.info(
1615 "MPLS tests will not run (missing mpls-router kernel module)"
1616 )
1617 elif not module_present("mpls-iptunnel"):
1618 logger.info(
1619 "MPLS tests will not run (missing mpls-iptunnel kernel module)"
1620 )
1621 else:
1622 self.hasmpls = True
1623 if self.hasmpls != True:
1624 return "LDP/MPLS Tests need mpls kernel modules"
1625
1626 # Really want to use sysctl_atleast here, but only when MPLS is actually being
1627 # used
1628 self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
1629
1630 shell_routers = g_extra_config["shell"]
1631 if "all" in shell_routers or self.name in shell_routers:
1632 self.run_in_window(os.getenv("SHELL", "bash"), title="sh-%s" % self.name)
1633
1634 if self.daemons["eigrpd"] == 1:
1635 eigrpd_path = os.path.join(self.daemondir, "eigrpd")
1636 if not os.path.isfile(eigrpd_path):
1637 logger.info("EIGRP Test, but no eigrpd compiled or installed")
1638 return "EIGRP Test, but no eigrpd compiled or installed"
1639
1640 if self.daemons["bfdd"] == 1:
1641 bfdd_path = os.path.join(self.daemondir, "bfdd")
1642 if not os.path.isfile(bfdd_path):
1643 logger.info("BFD Test, but no bfdd compiled or installed")
1644 return "BFD Test, but no bfdd compiled or installed"
1645
1646 status = self.startRouterDaemons(tgen=tgen)
1647
1648 vtysh_routers = g_extra_config["vtysh"]
1649 if "all" in vtysh_routers or self.name in vtysh_routers:
1650 self.run_in_window("vtysh", title="vt-%s" % self.name)
1651
1652 if self.unified_config:
1653 self.cmd("vtysh -f /etc/frr/frr.conf")
1654
1655 return status
1656
1657 def getStdErr(self, daemon):
1658 return self.getLog("err", daemon)
1659
1660 def getStdOut(self, daemon):
1661 return self.getLog("out", daemon)
1662
1663 def getLog(self, log, daemon):
1664 return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log))
1665
1666 def startRouterDaemons(self, daemons=None, tgen=None):
1667 "Starts FRR daemons for this router."
1668
1669 asan_abort = g_extra_config["asan_abort"]
1670 gdb_breakpoints = g_extra_config["gdb_breakpoints"]
1671 gdb_daemons = g_extra_config["gdb_daemons"]
1672 gdb_routers = g_extra_config["gdb_routers"]
1673 valgrind_extra = g_extra_config["valgrind_extra"]
1674 valgrind_memleaks = g_extra_config["valgrind_memleaks"]
1675 strace_daemons = g_extra_config["strace_daemons"]
1676
1677 # Get global bundle data
1678 if not self.path_exists("/etc/frr/support_bundle_commands.conf"):
1679 # Copy global value if was covered by namespace mount
1680 bundle_data = ""
1681 if os.path.exists("/etc/frr/support_bundle_commands.conf"):
1682 with open("/etc/frr/support_bundle_commands.conf", "r") as rf:
1683 bundle_data = rf.read()
1684 self.cmd_raises(
1685 "cat > /etc/frr/support_bundle_commands.conf",
1686 stdin=bundle_data,
1687 )
1688
1689 # Starts actual daemons without init (ie restart)
1690 # cd to per node directory
1691 self.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self.logdir, self.name))
1692 self.set_cwd("{}/{}".format(self.logdir, self.name))
1693 self.cmd("umask 000")
1694
1695 # Re-enable to allow for report per run
1696 self.reportCores = True
1697
1698 # XXX: glue code forward ported from removed function.
1699 if self.version == None:
1700 self.version = self.cmd(
1701 os.path.join(self.daemondir, "bgpd") + " -v"
1702 ).split()[2]
1703 logger.info("{}: running version: {}".format(self.name, self.version))
1704 # If `daemons` was specified then some upper API called us with
1705 # specific daemons, otherwise just use our own configuration.
1706 daemons_list = []
1707 if daemons is not None:
1708 daemons_list = daemons
1709 else:
1710 # Append all daemons configured.
1711 for daemon in self.daemons:
1712 if self.daemons[daemon] == 1:
1713 daemons_list.append(daemon)
1714
1715 def start_daemon(daemon, extra_opts=None):
1716 daemon_opts = self.daemons_options.get(daemon, "")
1717 rediropt = " > {0}.out 2> {0}.err".format(daemon)
1718 if daemon == "snmpd":
1719 binary = "/usr/sbin/snmpd"
1720 cmdenv = ""
1721 cmdopt = "{} -C -c /etc/frr/snmpd.conf -p ".format(
1722 daemon_opts
1723 ) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self.routertype)
1724 else:
1725 binary = os.path.join(self.daemondir, daemon)
1726
1727 cmdenv = "ASAN_OPTIONS="
1728 if asan_abort:
1729 cmdenv = "abort_on_error=1:"
1730 cmdenv += "log_path={0}/{1}.{2}.asan ".format(
1731 self.logdir, self.name, daemon
1732 )
1733
1734 if valgrind_memleaks:
1735 this_dir = os.path.dirname(
1736 os.path.abspath(os.path.realpath(__file__))
1737 )
1738 supp_file = os.path.abspath(
1739 os.path.join(this_dir, "../../../tools/valgrind.supp")
1740 )
1741 cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
1742 daemon, self.logdir, self.name, supp_file
1743 )
1744 if valgrind_extra:
1745 cmdenv += (
1746 " --gen-suppressions=all --expensive-definedness-checks=yes"
1747 )
1748 elif daemon in strace_daemons or "all" in strace_daemons:
1749 cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(
1750 daemon, self.logdir, self.name
1751 )
1752
1753 cmdopt = "{} --command-log-always --log file:{}.log --log-level debug".format(
1754 daemon_opts, daemon
1755 )
1756 if extra_opts:
1757 cmdopt += " " + extra_opts
1758
1759 if (
1760 (gdb_routers or gdb_daemons)
1761 and (
1762 not gdb_routers or self.name in gdb_routers or "all" in gdb_routers
1763 )
1764 and (not gdb_daemons or daemon in gdb_daemons or "all" in gdb_daemons)
1765 ):
1766 if daemon == "snmpd":
1767 cmdopt += " -f "
1768
1769 cmdopt += rediropt
1770 gdbcmd = "sudo -E gdb " + binary
1771 if gdb_breakpoints:
1772 gdbcmd += " -ex 'set breakpoint pending on'"
1773 for bp in gdb_breakpoints:
1774 gdbcmd += " -ex 'b {}'".format(bp)
1775 gdbcmd += " -ex 'run {}'".format(cmdopt)
1776
1777 self.run_in_window(gdbcmd, daemon)
1778
1779 logger.info(
1780 "%s: %s %s launched in gdb window", self, self.routertype, daemon
1781 )
1782 else:
1783 if daemon != "snmpd":
1784 cmdopt += " -d "
1785 cmdopt += rediropt
1786
1787 try:
1788 self.cmd_raises(" ".join([cmdenv, binary, cmdopt]), warn=False)
1789 except subprocess.CalledProcessError as error:
1790 self.logger.error(
1791 '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
1792 self,
1793 daemon,
1794 error.returncode,
1795 error.cmd,
1796 '\n:stdout: "{}"'.format(error.stdout.strip())
1797 if error.stdout
1798 else "",
1799 '\n:stderr: "{}"'.format(error.stderr.strip())
1800 if error.stderr
1801 else "",
1802 )
1803 else:
1804 logger.info("%s: %s %s started", self, self.routertype, daemon)
1805
1806 # Start Zebra first
1807 if "zebra" in daemons_list:
1808 start_daemon("zebra", "-s 90000000")
1809 while "zebra" in daemons_list:
1810 daemons_list.remove("zebra")
1811
1812 # Start staticd next if required
1813 if "staticd" in daemons_list:
1814 start_daemon("staticd")
1815 while "staticd" in daemons_list:
1816 daemons_list.remove("staticd")
1817
1818 if "snmpd" in daemons_list:
1819 # Give zerbra a chance to configure interface addresses that snmpd daemon
1820 # may then use.
1821 time.sleep(2)
1822
1823 start_daemon("snmpd")
1824 while "snmpd" in daemons_list:
1825 daemons_list.remove("snmpd")
1826
1827 if daemons is None:
1828 # Fix Link-Local Addresses on initial startup
1829 # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
1830 _, output, _ = self.cmd_status(
1831 "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done",
1832 stderr=subprocess.STDOUT,
1833 )
1834 logger.debug("Set MACs:\n%s", output)
1835
1836 # Now start all the other daemons
1837 for daemon in daemons_list:
1838 if self.daemons[daemon] == 0:
1839 continue
1840 start_daemon(daemon)
1841
1842 # Check if daemons are running.
1843 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1844 if re.search(r"No such file or directory", rundaemons):
1845 return "Daemons are not running"
1846
1847 # Update the permissions on the log files
1848 self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name))
1849 self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name))
1850
1851 return ""
1852
1853 def killRouterDaemons(
1854 self, daemons, wait=True, assertOnError=True, minErrorVersion="5.1"
1855 ):
1856 # Kill Running FRR
1857 # Daemons(user specified daemon only) using SIGKILL
1858 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1859 errors = ""
1860 daemonsNotRunning = []
1861 if re.search(r"No such file or directory", rundaemons):
1862 return errors
1863 for daemon in daemons:
1864 if rundaemons is not None and daemon in rundaemons:
1865 numRunning = 0
1866 dmns = rundaemons.split("\n")
1867 # Exclude empty string at end of list
1868 for d in dmns[:-1]:
1869 if re.search(r"%s" % daemon, d):
1870 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1871 if daemonpid.isdigit() and pid_exists(int(daemonpid)):
1872 logger.info(
1873 "{}: killing {}".format(
1874 self.name,
1875 os.path.basename(d.rstrip().rsplit(".", 1)[0]),
1876 )
1877 )
1878 self.cmd("kill -9 %s" % daemonpid)
1879 if pid_exists(int(daemonpid)):
1880 numRunning += 1
1881 while wait and numRunning > 0:
1882 sleep(
1883 2,
1884 "{}: waiting for {} daemon to be stopped".format(
1885 self.name, daemon
1886 ),
1887 )
1888
1889 # 2nd round of kill if daemons didn't exit
1890 for d in dmns[:-1]:
1891 if re.search(r"%s" % daemon, d):
1892 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1893 if daemonpid.isdigit() and pid_exists(
1894 int(daemonpid)
1895 ):
1896 logger.info(
1897 "{}: killing {}".format(
1898 self.name,
1899 os.path.basename(
1900 d.rstrip().rsplit(".", 1)[0]
1901 ),
1902 )
1903 )
1904 self.cmd("kill -9 %s" % daemonpid)
1905 if daemonpid.isdigit() and not pid_exists(
1906 int(daemonpid)
1907 ):
1908 numRunning -= 1
1909 self.cmd("rm -- {}".format(d.rstrip()))
1910 if wait:
1911 errors = self.checkRouterCores(reportOnce=True)
1912 if self.checkRouterVersion("<", minErrorVersion):
1913 # ignore errors in old versions
1914 errors = ""
1915 if assertOnError and len(errors) > 0:
1916 assert "Errors found - details follow:" == 0, errors
1917 else:
1918 daemonsNotRunning.append(daemon)
1919 if len(daemonsNotRunning) > 0:
1920 errors = errors + "Daemons are not running", daemonsNotRunning
1921
1922 return errors
1923
1924 def checkRouterCores(self, reportLeaks=True, reportOnce=False):
1925 if reportOnce and not self.reportCores:
1926 return
1927 reportMade = False
1928 traces = ""
1929 for daemon in self.daemons:
1930 if self.daemons[daemon] == 1:
1931 # Look for core file
1932 corefiles = glob.glob(
1933 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
1934 )
1935 if len(corefiles) > 0:
1936 backtrace = gdb_core(self, daemon, corefiles)
1937 traces = (
1938 traces
1939 + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s"
1940 % (self.name, daemon, backtrace)
1941 )
1942 reportMade = True
1943 elif reportLeaks:
1944 log = self.getStdErr(daemon)
1945 if "memstats" in log:
1946 sys.stderr.write(
1947 "%s: %s has memory leaks:\n" % (self.name, daemon)
1948 )
1949 traces = traces + "\n%s: %s has memory leaks:\n" % (
1950 self.name,
1951 daemon,
1952 )
1953 log = re.sub("core_handler: ", "", log)
1954 log = re.sub(
1955 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
1956 r"\n ## \1",
1957 log,
1958 )
1959 log = re.sub("memstats: ", " ", log)
1960 sys.stderr.write(log)
1961 reportMade = True
1962 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
1963 if checkAddressSanitizerError(
1964 self.getStdErr(daemon), self.name, daemon, self.logdir
1965 ):
1966 sys.stderr.write(
1967 "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)
1968 )
1969 traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (
1970 self.name,
1971 daemon,
1972 )
1973 reportMade = True
1974 if reportMade:
1975 self.reportCores = False
1976 return traces
1977
1978 def checkRouterRunning(self):
1979 "Check if router daemons are running and collect crashinfo they don't run"
1980
1981 global fatal_error
1982
1983 daemonsRunning = self.cmd(
1984 'vtysh -c "show logging" | grep "Logging configuration for"'
1985 )
1986 # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
1987 if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"):
1988 return "%s: vtysh killed by AddressSanitizer" % (self.name)
1989
1990 for daemon in self.daemons:
1991 if daemon == "snmpd":
1992 continue
1993 if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
1994 sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
1995 if daemon == "staticd":
1996 sys.stderr.write(
1997 "You may have a copy of staticd installed but are attempting to test against\n"
1998 )
1999 sys.stderr.write(
2000 "a version of FRR that does not have staticd, please cleanup the install dir\n"
2001 )
2002
2003 # Look for core file
2004 corefiles = glob.glob(
2005 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
2006 )
2007 if len(corefiles) > 0:
2008 gdb_core(self, daemon, corefiles)
2009 else:
2010 # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
2011 if os.path.isfile(
2012 "{}/{}/{}.log".format(self.logdir, self.name, daemon)
2013 ):
2014 log_tail = subprocess.check_output(
2015 [
2016 "tail -n20 {}/{}/{}.log 2> /dev/null".format(
2017 self.logdir, self.name, daemon
2018 )
2019 ],
2020 shell=True,
2021 )
2022 sys.stderr.write(
2023 "\nFrom %s %s %s log file:\n"
2024 % (self.routertype, self.name, daemon)
2025 )
2026 sys.stderr.write("%s\n" % log_tail)
2027
2028 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2029 if checkAddressSanitizerError(
2030 self.getStdErr(daemon), self.name, daemon, self.logdir
2031 ):
2032 return "%s: Daemon %s not running - killed by AddressSanitizer" % (
2033 self.name,
2034 daemon,
2035 )
2036
2037 return "%s: Daemon %s not running" % (self.name, daemon)
2038 return ""
2039
2040 def checkRouterVersion(self, cmpop, version):
2041 """
2042 Compares router version using operation `cmpop` with `version`.
2043 Valid `cmpop` values:
2044 * `>=`: has the same version or greater
2045 * '>': has greater version
2046 * '=': has the same version
2047 * '<': has a lesser version
2048 * '<=': has the same version or lesser
2049
2050 Usage example: router.checkRouterVersion('>', '1.0')
2051 """
2052
2053 # Make sure we have version information first
2054 if self.version == None:
2055 self.version = self.cmd(
2056 os.path.join(self.daemondir, "bgpd") + " -v"
2057 ).split()[2]
2058 logger.info("{}: running version: {}".format(self.name, self.version))
2059
2060 rversion = self.version
2061 if rversion == None:
2062 return False
2063
2064 result = version_cmp(rversion, version)
2065 if cmpop == ">=":
2066 return result >= 0
2067 if cmpop == ">":
2068 return result > 0
2069 if cmpop == "=":
2070 return result == 0
2071 if cmpop == "<":
2072 return result < 0
2073 if cmpop == "<":
2074 return result < 0
2075 if cmpop == "<=":
2076 return result <= 0
2077
2078 def get_ipv6_linklocal(self):
2079 "Get LinkLocal Addresses from interfaces"
2080
2081 linklocal = []
2082
2083 ifaces = self.cmd("ip -6 address")
2084 # Fix newlines (make them all the same)
2085 ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
2086 interface = ""
2087 ll_per_if_count = 0
2088 for line in ifaces:
2089 m = re.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line)
2090 if m:
2091 interface = m.group(1)
2092 ll_per_if_count = 0
2093 m = re.search(
2094 "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
2095 line,
2096 )
2097 if m:
2098 local = m.group(1)
2099 ll_per_if_count += 1
2100 if ll_per_if_count > 1:
2101 linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
2102 else:
2103 linklocal += [[interface, local]]
2104 return linklocal
2105
2106 def daemon_available(self, daemon):
2107 "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
2108
2109 daemon_path = os.path.join(self.daemondir, daemon)
2110 if not os.path.isfile(daemon_path):
2111 return False
2112 if daemon == "ldpd":
2113 if version_cmp(platform.release(), "4.5") < 0:
2114 return False
2115 if not module_present("mpls-router", load=False):
2116 return False
2117 if not module_present("mpls-iptunnel", load=False):
2118 return False
2119 return True
2120
2121 def get_routertype(self):
2122 "Return the type of Router (frr)"
2123
2124 return self.routertype
2125
2126 def report_memory_leaks(self, filename_prefix, testscript):
2127 "Report Memory Leaks to file prefixed with given string"
2128
2129 leakfound = False
2130 filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt"
2131 for daemon in self.daemons:
2132 if self.daemons[daemon] == 1:
2133 log = self.getStdErr(daemon)
2134 if "memstats" in log:
2135 # Found memory leak
2136 logger.info(
2137 "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log)
2138 )
2139 if not leakfound:
2140 leakfound = True
2141 # Check if file already exists
2142 fileexists = os.path.isfile(filename)
2143 leakfile = open(filename, "a")
2144 if not fileexists:
2145 # New file - add header
2146 leakfile.write(
2147 "# Memory Leak Detection for topotest %s\n\n"
2148 % testscript
2149 )
2150 leakfile.write("## Router %s\n" % self.name)
2151 leakfile.write("### Process %s\n" % daemon)
2152 log = re.sub("core_handler: ", "", log)
2153 log = re.sub(
2154 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
2155 r"\n#### \1\n",
2156 log,
2157 )
2158 log = re.sub("memstats: ", " ", log)
2159 leakfile.write(log)
2160 leakfile.write("\n")
2161 if leakfound:
2162 leakfile.close()
2163
2164
2165 def frr_unicode(s):
2166 """Convert string to unicode, depending on python version"""
2167 if sys.version_info[0] > 2:
2168 return s
2169 else:
2170 return unicode(s) # pylint: disable=E0602
2171
2172
2173 def is_mapping(o):
2174 return isinstance(o, Mapping)