]> git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/lib/topotest.py
Merge pull request #11485 from AbhishekNR/ipv6_mld_todo
[mirror_frr.git] / tests / topotests / lib / topotest.py
1 #!/usr/bin/env python
2
3 #
4 # topotest.py
5 # Library of helper functions for NetDEF Topology Tests
6 #
7 # Copyright (c) 2016 by
8 # Network Device Education Foundation, Inc. ("NetDEF")
9 #
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
13 # in all copies.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 # OF THIS SOFTWARE.
23 #
24
25 import difflib
26 import errno
27 import functools
28 import glob
29 import json
30 import os
31 import pdb
32 import platform
33 import re
34 import resource
35 import signal
36 import subprocess
37 import sys
38 import tempfile
39 import time
40 from copy import deepcopy
41
42 import lib.topolog as topolog
43 from lib.topolog import logger
44
45 if sys.version_info[0] > 2:
46 import configparser
47 from collections.abc import Mapping
48 else:
49 import ConfigParser as configparser
50 from collections import Mapping
51
52 from lib import micronet
53 from lib.micronet_compat import Node
54
55 g_extra_config = {}
56
57
58 def get_logs_path(rundir):
59 logspath = topolog.get_test_logdir()
60 return os.path.join(rundir, logspath)
61
62
63 def gdb_core(obj, daemon, corefiles):
64 gdbcmds = """
65 info threads
66 bt full
67 disassemble
68 up
69 disassemble
70 up
71 disassemble
72 up
73 disassemble
74 up
75 disassemble
76 up
77 disassemble
78 """
79 gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")]
80 gdbcmds = [item for sl in gdbcmds for item in sl]
81
82 daemon_path = os.path.join(obj.daemondir, daemon)
83 backtrace = subprocess.check_output(
84 ["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds
85 )
86 sys.stderr.write(
87 "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon)
88 )
89 sys.stderr.write("%s" % backtrace)
90 return backtrace
91
92
93 class json_cmp_result(object):
94 "json_cmp result class for better assertion messages"
95
96 def __init__(self):
97 self.errors = []
98
99 def add_error(self, error):
100 "Append error message to the result"
101 for line in error.splitlines():
102 self.errors.append(line)
103
104 def has_errors(self):
105 "Returns True if there were errors, otherwise False."
106 return len(self.errors) > 0
107
108 def gen_report(self):
109 headline = ["Generated JSON diff error report:", ""]
110 return headline + self.errors
111
112 def __str__(self):
113 return (
114 "Generated JSON diff error report:\n\n\n" + "\n".join(self.errors) + "\n\n"
115 )
116
117
118 def gen_json_diff_report(d1, d2, exact=False, path="> $", acc=(0, "")):
119 """
120 Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye.
121 """
122
123 def dump_json(v):
124 if isinstance(v, (dict, list)):
125 return "\t" + "\t".join(
126 json.dumps(v, indent=4, separators=(",", ": ")).splitlines(True)
127 )
128 else:
129 return "'{}'".format(v)
130
131 def json_type(v):
132 if isinstance(v, (list, tuple)):
133 return "Array"
134 elif isinstance(v, dict):
135 return "Object"
136 elif isinstance(v, (int, float)):
137 return "Number"
138 elif isinstance(v, bool):
139 return "Boolean"
140 elif isinstance(v, str):
141 return "String"
142 elif v == None:
143 return "null"
144
145 def get_errors(other_acc):
146 return other_acc[1]
147
148 def get_errors_n(other_acc):
149 return other_acc[0]
150
151 def add_error(acc, msg, points=1):
152 return (acc[0] + points, acc[1] + "{}: {}\n".format(path, msg))
153
154 def merge_errors(acc, other_acc):
155 return (acc[0] + other_acc[0], acc[1] + other_acc[1])
156
157 def add_idx(idx):
158 return "{}[{}]".format(path, idx)
159
160 def add_key(key):
161 return "{}->{}".format(path, key)
162
163 def has_errors(other_acc):
164 return other_acc[0] > 0
165
166 if d2 == "*" or (
167 not isinstance(d1, (list, dict))
168 and not isinstance(d2, (list, dict))
169 and d1 == d2
170 ):
171 return acc
172 elif (
173 not isinstance(d1, (list, dict))
174 and not isinstance(d2, (list, dict))
175 and d1 != d2
176 ):
177 acc = add_error(
178 acc,
179 "d1 has element with value '{}' but in d2 it has value '{}'".format(d1, d2),
180 )
181 elif (
182 isinstance(d1, list)
183 and isinstance(d2, list)
184 and ((len(d2) > 0 and d2[0] == "__ordered__") or exact)
185 ):
186 if not exact:
187 del d2[0]
188 if len(d1) != len(d2):
189 acc = add_error(
190 acc,
191 "d1 has Array of length {} but in d2 it is of length {}".format(
192 len(d1), len(d2)
193 ),
194 )
195 else:
196 for idx, v1, v2 in zip(range(0, len(d1)), d1, d2):
197 acc = merge_errors(
198 acc, gen_json_diff_report(v1, v2, exact=exact, path=add_idx(idx))
199 )
200 elif isinstance(d1, list) and isinstance(d2, list):
201 if len(d1) < len(d2):
202 acc = add_error(
203 acc,
204 "d1 has Array of length {} but in d2 it is of length {}".format(
205 len(d1), len(d2)
206 ),
207 )
208 else:
209 for idx2, v2 in zip(range(0, len(d2)), d2):
210 found_match = False
211 closest_diff = None
212 closest_idx = None
213 for idx1, v1 in zip(range(0, len(d1)), d1):
214 tmp_v1 = deepcopy(v1)
215 tmp_v2 = deepcopy(v2)
216 tmp_diff = gen_json_diff_report(tmp_v1, tmp_v2, path=add_idx(idx1))
217 if not has_errors(tmp_diff):
218 found_match = True
219 del d1[idx1]
220 break
221 elif not closest_diff or get_errors_n(tmp_diff) < get_errors_n(
222 closest_diff
223 ):
224 closest_diff = tmp_diff
225 closest_idx = idx1
226 if not found_match and isinstance(v2, (list, dict)):
227 sub_error = "\n\n\t{}".format(
228 "\t".join(get_errors(closest_diff).splitlines(True))
229 )
230 acc = add_error(
231 acc,
232 (
233 "d2 has the following element at index {} which is not present in d1: "
234 + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}"
235 ).format(idx2, dump_json(v2), closest_idx, sub_error),
236 )
237 if not found_match and not isinstance(v2, (list, dict)):
238 acc = add_error(
239 acc,
240 "d2 has the following element at index {} which is not present in d1: {}".format(
241 idx2, dump_json(v2)
242 ),
243 )
244 elif isinstance(d1, dict) and isinstance(d2, dict) and exact:
245 invalid_keys_d1 = [k for k in d1.keys() if k not in d2.keys()]
246 invalid_keys_d2 = [k for k in d2.keys() if k not in d1.keys()]
247 for k in invalid_keys_d1:
248 acc = add_error(acc, "d1 has key '{}' which is not present in d2".format(k))
249 for k in invalid_keys_d2:
250 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
251 valid_keys_intersection = [k for k in d1.keys() if k in d2.keys()]
252 for k in valid_keys_intersection:
253 acc = merge_errors(
254 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
255 )
256 elif isinstance(d1, dict) and isinstance(d2, dict):
257 none_keys = [k for k, v in d2.items() if v == None]
258 none_keys_present = [k for k in d1.keys() if k in none_keys]
259 for k in none_keys_present:
260 acc = add_error(
261 acc, "d1 has key '{}' which is not supposed to be present".format(k)
262 )
263 keys = [k for k, v in d2.items() if v != None]
264 invalid_keys_intersection = [k for k in keys if k not in d1.keys()]
265 for k in invalid_keys_intersection:
266 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
267 valid_keys_intersection = [k for k in keys if k in d1.keys()]
268 for k in valid_keys_intersection:
269 acc = merge_errors(
270 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
271 )
272 else:
273 acc = add_error(
274 acc,
275 "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format(
276 json_type(d1), json_type(d2)
277 ),
278 points=2,
279 )
280
281 return acc
282
283
284 def json_cmp(d1, d2, exact=False):
285 """
286 JSON compare function. Receives two parameters:
287 * `d1`: parsed JSON data structure
288 * `d2`: parsed JSON data structure
289
290 Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
291 in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an
292 error report is generated and wrapped in a 'json_cmp_result()'. There are special
293 parameters and notations explained below which can be used to cover rather unusual
294 cases:
295
296 * when 'exact is set to 'True' then d1 and d2 are tested for equality (including
297 order within JSON Arrays)
298 * using 'null' (or 'None' in Python) as JSON Object value is checking for key
299 absence in d1
300 * using '*' as JSON Object value or Array value is checking for presence in d1
301 without checking the values
302 * using '__ordered__' as first element in a JSON Array in d2 will also check the
303 order when it is compared to an Array in d1
304 """
305
306 (errors_n, errors) = gen_json_diff_report(deepcopy(d1), deepcopy(d2), exact=exact)
307
308 if errors_n > 0:
309 result = json_cmp_result()
310 result.add_error(errors)
311 return result
312 else:
313 return None
314
315
316 def router_output_cmp(router, cmd, expected):
317 """
318 Runs `cmd` in router and compares the output with `expected`.
319 """
320 return difflines(
321 normalize_text(router.vtysh_cmd(cmd)),
322 normalize_text(expected),
323 title1="Current output",
324 title2="Expected output",
325 )
326
327
328 def router_json_cmp(router, cmd, data, exact=False):
329 """
330 Runs `cmd` that returns JSON data (normally the command ends with 'json')
331 and compare with `data` contents.
332 """
333 return json_cmp(router.vtysh_cmd(cmd, isjson=True), data, exact)
334
335
336 def run_and_expect(func, what, count=20, wait=3):
337 """
338 Run `func` and compare the result with `what`. Do it for `count` times
339 waiting `wait` seconds between tries. By default it tries 20 times with
340 3 seconds delay between tries.
341
342 Returns (True, func-return) on success or
343 (False, func-return) on failure.
344
345 ---
346
347 Helper functions to use with this function:
348 - router_output_cmp
349 - router_json_cmp
350 """
351 start_time = time.time()
352 func_name = "<unknown>"
353 if func.__class__ == functools.partial:
354 func_name = func.func.__name__
355 else:
356 func_name = func.__name__
357
358 logger.info(
359 "'{}' polling started (interval {} secs, maximum {} tries)".format(
360 func_name, wait, count
361 )
362 )
363
364 while count > 0:
365 result = func()
366 if result != what:
367 time.sleep(wait)
368 count -= 1
369 continue
370
371 end_time = time.time()
372 logger.info(
373 "'{}' succeeded after {:.2f} seconds".format(
374 func_name, end_time - start_time
375 )
376 )
377 return (True, result)
378
379 end_time = time.time()
380 logger.error(
381 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
382 )
383 return (False, result)
384
385
386 def run_and_expect_type(func, etype, count=20, wait=3, avalue=None):
387 """
388 Run `func` and compare the result with `etype`. Do it for `count` times
389 waiting `wait` seconds between tries. By default it tries 20 times with
390 3 seconds delay between tries.
391
392 This function is used when you want to test the return type and,
393 optionally, the return value.
394
395 Returns (True, func-return) on success or
396 (False, func-return) on failure.
397 """
398 start_time = time.time()
399 func_name = "<unknown>"
400 if func.__class__ == functools.partial:
401 func_name = func.func.__name__
402 else:
403 func_name = func.__name__
404
405 logger.info(
406 "'{}' polling started (interval {} secs, maximum wait {} secs)".format(
407 func_name, wait, int(wait * count)
408 )
409 )
410
411 while count > 0:
412 result = func()
413 if not isinstance(result, etype):
414 logger.debug(
415 "Expected result type '{}' got '{}' instead".format(etype, type(result))
416 )
417 time.sleep(wait)
418 count -= 1
419 continue
420
421 if etype != type(None) and avalue != None and result != avalue:
422 logger.debug("Expected value '{}' got '{}' instead".format(avalue, result))
423 time.sleep(wait)
424 count -= 1
425 continue
426
427 end_time = time.time()
428 logger.info(
429 "'{}' succeeded after {:.2f} seconds".format(
430 func_name, end_time - start_time
431 )
432 )
433 return (True, result)
434
435 end_time = time.time()
436 logger.error(
437 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
438 )
439 return (False, result)
440
441
442 def router_json_cmp_retry(router, cmd, data, exact=False, retry_timeout=10.0):
443 """
444 Runs `cmd` that returns JSON data (normally the command ends with 'json')
445 and compare with `data` contents. Retry by default for 10 seconds
446 """
447
448 def test_func():
449 return router_json_cmp(router, cmd, data, exact)
450
451 ok, _ = run_and_expect(test_func, None, int(retry_timeout), 1)
452 return ok
453
454
455 def int2dpid(dpid):
456 "Converting Integer to DPID"
457
458 try:
459 dpid = hex(dpid)[2:]
460 dpid = "0" * (16 - len(dpid)) + dpid
461 return dpid
462 except IndexError:
463 raise Exception(
464 "Unable to derive default datapath ID - "
465 "please either specify a dpid or use a "
466 "canonical switch name such as s23."
467 )
468
469
470 def pid_exists(pid):
471 "Check whether pid exists in the current process table."
472
473 if pid <= 0:
474 return False
475 try:
476 os.waitpid(pid, os.WNOHANG)
477 except:
478 pass
479 try:
480 os.kill(pid, 0)
481 except OSError as err:
482 if err.errno == errno.ESRCH:
483 # ESRCH == No such process
484 return False
485 elif err.errno == errno.EPERM:
486 # EPERM clearly means there's a process to deny access to
487 return True
488 else:
489 # According to "man 2 kill" possible error values are
490 # (EINVAL, EPERM, ESRCH)
491 raise
492 else:
493 return True
494
495
496 def get_textdiff(text1, text2, title1="", title2="", **opts):
497 "Returns empty string if same or formatted diff"
498
499 diff = "\n".join(
500 difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts)
501 )
502 # Clean up line endings
503 diff = os.linesep.join([s for s in diff.splitlines() if s])
504 return diff
505
506
507 def difflines(text1, text2, title1="", title2="", **opts):
508 "Wrapper for get_textdiff to avoid string transformations."
509 text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1)
510 text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1)
511 return get_textdiff(text1, text2, title1, title2, **opts)
512
513
514 def get_file(content):
515 """
516 Generates a temporary file in '/tmp' with `content` and returns the file name.
517 """
518 if isinstance(content, list) or isinstance(content, tuple):
519 content = "\n".join(content)
520 fde = tempfile.NamedTemporaryFile(mode="w", delete=False)
521 fname = fde.name
522 fde.write(content)
523 fde.close()
524 return fname
525
526
527 def normalize_text(text):
528 """
529 Strips formating spaces/tabs, carriage returns and trailing whitespace.
530 """
531 text = re.sub(r"[ \t]+", " ", text)
532 text = re.sub(r"\r", "", text)
533
534 # Remove whitespace in the middle of text.
535 text = re.sub(r"[ \t]+\n", "\n", text)
536 # Remove whitespace at the end of the text.
537 text = text.rstrip()
538
539 return text
540
541
542 def is_linux():
543 """
544 Parses unix name output to check if running on GNU/Linux.
545
546 Returns True if running on Linux, returns False otherwise.
547 """
548
549 if os.uname()[0] == "Linux":
550 return True
551 return False
552
553
554 def iproute2_is_vrf_capable():
555 """
556 Checks if the iproute2 version installed on the system is capable of
557 handling VRFs by interpreting the output of the 'ip' utility found in PATH.
558
559 Returns True if capability can be detected, returns False otherwise.
560 """
561
562 if is_linux():
563 try:
564 subp = subprocess.Popen(
565 ["ip", "route", "show", "vrf"],
566 stdout=subprocess.PIPE,
567 stderr=subprocess.PIPE,
568 stdin=subprocess.PIPE,
569 )
570 iproute2_err = subp.communicate()[1].splitlines()[0].split()[0]
571
572 if iproute2_err != "Error:":
573 return True
574 except Exception:
575 pass
576 return False
577
578
579 def module_present_linux(module, load):
580 """
581 Returns whether `module` is present.
582
583 If `load` is true, it will try to load it via modprobe.
584 """
585 with open("/proc/modules", "r") as modules_file:
586 if module.replace("-", "_") in modules_file.read():
587 return True
588 cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module)
589 if os.system(cmd) != 0:
590 return False
591 else:
592 return True
593
594
595 def module_present_freebsd(module, load):
596 return True
597
598
599 def module_present(module, load=True):
600 if sys.platform.startswith("linux"):
601 return module_present_linux(module, load)
602 elif sys.platform.startswith("freebsd"):
603 return module_present_freebsd(module, load)
604
605
606 def version_cmp(v1, v2):
607 """
608 Compare two version strings and returns:
609
610 * `-1`: if `v1` is less than `v2`
611 * `0`: if `v1` is equal to `v2`
612 * `1`: if `v1` is greater than `v2`
613
614 Raises `ValueError` if versions are not well formated.
615 """
616 vregex = r"(?P<whole>\d+(\.(\d+))*)"
617 v1m = re.match(vregex, v1)
618 v2m = re.match(vregex, v2)
619 if v1m is None or v2m is None:
620 raise ValueError("got a invalid version string")
621
622 # Split values
623 v1g = v1m.group("whole").split(".")
624 v2g = v2m.group("whole").split(".")
625
626 # Get the longest version string
627 vnum = len(v1g)
628 if len(v2g) > vnum:
629 vnum = len(v2g)
630
631 # Reverse list because we are going to pop the tail
632 v1g.reverse()
633 v2g.reverse()
634 for _ in range(vnum):
635 try:
636 v1n = int(v1g.pop())
637 except IndexError:
638 while v2g:
639 v2n = int(v2g.pop())
640 if v2n > 0:
641 return -1
642 break
643
644 try:
645 v2n = int(v2g.pop())
646 except IndexError:
647 if v1n > 0:
648 return 1
649 while v1g:
650 v1n = int(v1g.pop())
651 if v1n > 0:
652 return 1
653 break
654
655 if v1n > v2n:
656 return 1
657 if v1n < v2n:
658 return -1
659 return 0
660
661
662 def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None):
663 if ifaceaction:
664 str_ifaceaction = "no shutdown"
665 else:
666 str_ifaceaction = "shutdown"
667 if vrf_name == None:
668 cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
669 ifacename, str_ifaceaction
670 )
671 else:
672 cmd = (
673 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
674 ifacename, vrf_name, str_ifaceaction
675 )
676 )
677 node.run(cmd)
678
679
680 def ip4_route_zebra(node, vrf_name=None):
681 """
682 Gets an output of 'show ip route' command. It can be used
683 with comparing the output to a reference
684 """
685 if vrf_name == None:
686 tmp = node.vtysh_cmd("show ip route")
687 else:
688 tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name))
689 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
690
691 lines = output.splitlines()
692 header_found = False
693 while lines and (not lines[0].strip() or not header_found):
694 if "o - offload failure" in lines[0]:
695 header_found = True
696 lines = lines[1:]
697 return "\n".join(lines)
698
699
700 def ip6_route_zebra(node, vrf_name=None):
701 """
702 Retrieves the output of 'show ipv6 route [vrf vrf_name]', then
703 canonicalizes it by eliding link-locals.
704 """
705
706 if vrf_name == None:
707 tmp = node.vtysh_cmd("show ipv6 route")
708 else:
709 tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name))
710
711 # Mask out timestamp
712 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
713
714 # Mask out the link-local addresses
715 output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output)
716
717 lines = output.splitlines()
718 header_found = False
719 while lines and (not lines[0].strip() or not header_found):
720 if "o - offload failure" in lines[0]:
721 header_found = True
722 lines = lines[1:]
723
724 return "\n".join(lines)
725
726
727 def proto_name_to_number(protocol):
728 return {
729 "bgp": "186",
730 "isis": "187",
731 "ospf": "188",
732 "rip": "189",
733 "ripng": "190",
734 "nhrp": "191",
735 "eigrp": "192",
736 "ldp": "193",
737 "sharp": "194",
738 "pbr": "195",
739 "static": "196",
740 "ospf6": "197",
741 }.get(
742 protocol, protocol
743 ) # default return same as input
744
745
746 def ip4_route(node):
747 """
748 Gets a structured return of the command 'ip route'. It can be used in
749 conjunction with json_cmp() to provide accurate assert explanations.
750
751 Return example:
752 {
753 '10.0.1.0/24': {
754 'dev': 'eth0',
755 'via': '172.16.0.1',
756 'proto': '188',
757 },
758 '10.0.2.0/24': {
759 'dev': 'eth1',
760 'proto': 'kernel',
761 }
762 }
763 """
764 output = normalize_text(node.run("ip route")).splitlines()
765 result = {}
766 for line in output:
767 columns = line.split(" ")
768 route = result[columns[0]] = {}
769 prev = None
770 for column in columns:
771 if prev == "dev":
772 route["dev"] = column
773 if prev == "via":
774 route["via"] = column
775 if prev == "proto":
776 # translate protocol names back to numbers
777 route["proto"] = proto_name_to_number(column)
778 if prev == "metric":
779 route["metric"] = column
780 if prev == "scope":
781 route["scope"] = column
782 prev = column
783
784 return result
785
786
787 def ip4_vrf_route(node):
788 """
789 Gets a structured return of the command 'ip route show vrf {0}-cust1'.
790 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
791
792 Return example:
793 {
794 '10.0.1.0/24': {
795 'dev': 'eth0',
796 'via': '172.16.0.1',
797 'proto': '188',
798 },
799 '10.0.2.0/24': {
800 'dev': 'eth1',
801 'proto': 'kernel',
802 }
803 }
804 """
805 output = normalize_text(
806 node.run("ip route show vrf {0}-cust1".format(node.name))
807 ).splitlines()
808
809 result = {}
810 for line in output:
811 columns = line.split(" ")
812 route = result[columns[0]] = {}
813 prev = None
814 for column in columns:
815 if prev == "dev":
816 route["dev"] = column
817 if prev == "via":
818 route["via"] = column
819 if prev == "proto":
820 # translate protocol names back to numbers
821 route["proto"] = proto_name_to_number(column)
822 if prev == "metric":
823 route["metric"] = column
824 if prev == "scope":
825 route["scope"] = column
826 prev = column
827
828 return result
829
830
831 def ip6_route(node):
832 """
833 Gets a structured return of the command 'ip -6 route'. It can be used in
834 conjunction with json_cmp() to provide accurate assert explanations.
835
836 Return example:
837 {
838 '2001:db8:1::/64': {
839 'dev': 'eth0',
840 'proto': '188',
841 },
842 '2001:db8:2::/64': {
843 'dev': 'eth1',
844 'proto': 'kernel',
845 }
846 }
847 """
848 output = normalize_text(node.run("ip -6 route")).splitlines()
849 result = {}
850 for line in output:
851 columns = line.split(" ")
852 route = result[columns[0]] = {}
853 prev = None
854 for column in columns:
855 if prev == "dev":
856 route["dev"] = column
857 if prev == "via":
858 route["via"] = column
859 if prev == "proto":
860 # translate protocol names back to numbers
861 route["proto"] = proto_name_to_number(column)
862 if prev == "metric":
863 route["metric"] = column
864 if prev == "pref":
865 route["pref"] = column
866 prev = column
867
868 return result
869
870
871 def ip6_vrf_route(node):
872 """
873 Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'.
874 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
875
876 Return example:
877 {
878 '2001:db8:1::/64': {
879 'dev': 'eth0',
880 'proto': '188',
881 },
882 '2001:db8:2::/64': {
883 'dev': 'eth1',
884 'proto': 'kernel',
885 }
886 }
887 """
888 output = normalize_text(
889 node.run("ip -6 route show vrf {0}-cust1".format(node.name))
890 ).splitlines()
891 result = {}
892 for line in output:
893 columns = line.split(" ")
894 route = result[columns[0]] = {}
895 prev = None
896 for column in columns:
897 if prev == "dev":
898 route["dev"] = column
899 if prev == "via":
900 route["via"] = column
901 if prev == "proto":
902 # translate protocol names back to numbers
903 route["proto"] = proto_name_to_number(column)
904 if prev == "metric":
905 route["metric"] = column
906 if prev == "pref":
907 route["pref"] = column
908 prev = column
909
910 return result
911
912
913 def ip_rules(node):
914 """
915 Gets a structured return of the command 'ip rule'. It can be used in
916 conjunction with json_cmp() to provide accurate assert explanations.
917
918 Return example:
919 [
920 {
921 "pref": "0"
922 "from": "all"
923 },
924 {
925 "pref": "32766"
926 "from": "all"
927 },
928 {
929 "to": "3.4.5.0/24",
930 "iif": "r1-eth2",
931 "pref": "304",
932 "from": "1.2.0.0/16",
933 "proto": "zebra"
934 }
935 ]
936 """
937 output = normalize_text(node.run("ip rule")).splitlines()
938 result = []
939 for line in output:
940 columns = line.split(" ")
941
942 route = {}
943 # remove last character, since it is ':'
944 pref = columns[0][:-1]
945 route["pref"] = pref
946 prev = None
947 for column in columns:
948 if prev == "from":
949 route["from"] = column
950 if prev == "to":
951 route["to"] = column
952 if prev == "proto":
953 route["proto"] = column
954 if prev == "iif":
955 route["iif"] = column
956 if prev == "fwmark":
957 route["fwmark"] = column
958 prev = column
959
960 result.append(route)
961 return result
962
963
964 def sleep(amount, reason=None):
965 """
966 Sleep wrapper that registers in the log the amount of sleep
967 """
968 if reason is None:
969 logger.info("Sleeping for {} seconds".format(amount))
970 else:
971 logger.info(reason + " ({} seconds)".format(amount))
972
973 time.sleep(amount)
974
975
976 def checkAddressSanitizerError(output, router, component, logdir=""):
977 "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
978
979 def processAddressSanitizerError(asanErrorRe, output, router, component):
980 sys.stderr.write(
981 "%s: %s triggered an exception by AddressSanitizer\n" % (router, component)
982 )
983 # Sanitizer Error found in log
984 pidMark = asanErrorRe.group(1)
985 addressSanitizerLog = re.search(
986 "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL
987 )
988 if addressSanitizerLog:
989 # Find Calling Test. Could be multiple steps back
990 testframe = sys._current_frames().values()[0]
991 level = 0
992 while level < 10:
993 test = os.path.splitext(
994 os.path.basename(testframe.f_globals["__file__"])
995 )[0]
996 if (test != "topotest") and (test != "topogen"):
997 # Found the calling test
998 callingTest = os.path.basename(testframe.f_globals["__file__"])
999 break
1000 level = level + 1
1001 testframe = testframe.f_back
1002 if level >= 10:
1003 # somehow couldn't find the test script.
1004 callingTest = "unknownTest"
1005 #
1006 # Now finding Calling Procedure
1007 level = 0
1008 while level < 20:
1009 callingProc = sys._getframe(level).f_code.co_name
1010 if (
1011 (callingProc != "processAddressSanitizerError")
1012 and (callingProc != "checkAddressSanitizerError")
1013 and (callingProc != "checkRouterCores")
1014 and (callingProc != "stopRouter")
1015 and (callingProc != "stop")
1016 and (callingProc != "stop_topology")
1017 and (callingProc != "checkRouterRunning")
1018 and (callingProc != "check_router_running")
1019 and (callingProc != "routers_have_failure")
1020 ):
1021 # Found the calling test
1022 break
1023 level = level + 1
1024 if level >= 20:
1025 # something wrong - couldn't found the calling test function
1026 callingProc = "unknownProc"
1027 with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile:
1028 sys.stderr.write(
1029 "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1030 % (callingTest, callingProc, router)
1031 )
1032 sys.stderr.write(
1033 "\n".join(addressSanitizerLog.group(1).splitlines()) + "\n"
1034 )
1035 addrSanFile.write("## Error: %s\n\n" % asanErrorRe.group(2))
1036 addrSanFile.write(
1037 "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1038 % (callingTest, callingProc, router)
1039 )
1040 addrSanFile.write(
1041 " "
1042 + "\n ".join(addressSanitizerLog.group(1).splitlines())
1043 + "\n"
1044 )
1045 addrSanFile.write("\n---------------\n")
1046 return
1047
1048 addressSanitizerError = re.search(
1049 r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
1050 )
1051 if addressSanitizerError:
1052 processAddressSanitizerError(addressSanitizerError, output, router, component)
1053 return True
1054
1055 # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
1056 if logdir:
1057 filepattern = logdir + "/" + router + "/" + component + ".asan.*"
1058 logger.debug(
1059 "Log check for %s on %s, pattern %s\n" % (component, router, filepattern)
1060 )
1061 for file in glob.glob(filepattern):
1062 with open(file, "r") as asanErrorFile:
1063 asanError = asanErrorFile.read()
1064 addressSanitizerError = re.search(
1065 r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
1066 )
1067 if addressSanitizerError:
1068 processAddressSanitizerError(
1069 addressSanitizerError, asanError, router, component
1070 )
1071 return True
1072 return False
1073
1074
1075 def _sysctl_atleast(commander, variable, min_value):
1076 if isinstance(min_value, tuple):
1077 min_value = list(min_value)
1078 is_list = isinstance(min_value, list)
1079
1080 sval = commander.cmd_raises("sysctl -n " + variable).strip()
1081 if is_list:
1082 cur_val = [int(x) for x in sval.split()]
1083 else:
1084 cur_val = int(sval)
1085
1086 set_value = False
1087 if is_list:
1088 for i, v in enumerate(cur_val):
1089 if v < min_value[i]:
1090 set_value = True
1091 else:
1092 min_value[i] = v
1093 else:
1094 if cur_val < min_value:
1095 set_value = True
1096 if set_value:
1097 if is_list:
1098 valstr = " ".join([str(x) for x in min_value])
1099 else:
1100 valstr = str(min_value)
1101 logger.info("Increasing sysctl %s from %s to %s", variable, cur_val, valstr)
1102 commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
1103
1104
1105 def _sysctl_assure(commander, variable, value):
1106 if isinstance(value, tuple):
1107 value = list(value)
1108 is_list = isinstance(value, list)
1109
1110 sval = commander.cmd_raises("sysctl -n " + variable).strip()
1111 if is_list:
1112 cur_val = [int(x) for x in sval.split()]
1113 else:
1114 cur_val = sval
1115
1116 set_value = False
1117 if is_list:
1118 for i, v in enumerate(cur_val):
1119 if v != value[i]:
1120 set_value = True
1121 else:
1122 value[i] = v
1123 else:
1124 if cur_val != str(value):
1125 set_value = True
1126
1127 if set_value:
1128 if is_list:
1129 valstr = " ".join([str(x) for x in value])
1130 else:
1131 valstr = str(value)
1132 logger.info("Changing sysctl %s from %s to %s", variable, cur_val, valstr)
1133 commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
1134
1135
1136 def sysctl_atleast(commander, variable, min_value, raises=False):
1137 try:
1138 if commander is None:
1139 commander = micronet.Commander("topotest")
1140 return _sysctl_atleast(commander, variable, min_value)
1141 except subprocess.CalledProcessError as error:
1142 logger.warning(
1143 "%s: Failed to assure sysctl min value %s = %s",
1144 commander,
1145 variable,
1146 min_value,
1147 )
1148 if raises:
1149 raise
1150
1151
1152 def sysctl_assure(commander, variable, value, raises=False):
1153 try:
1154 if commander is None:
1155 commander = micronet.Commander("topotest")
1156 return _sysctl_assure(commander, variable, value)
1157 except subprocess.CalledProcessError as error:
1158 logger.warning(
1159 "%s: Failed to assure sysctl value %s = %s",
1160 commander,
1161 variable,
1162 value,
1163 exc_info=True,
1164 )
1165 if raises:
1166 raise
1167
1168
1169 def rlimit_atleast(rname, min_value, raises=False):
1170 try:
1171 cval = resource.getrlimit(rname)
1172 soft, hard = cval
1173 if soft < min_value:
1174 nval = (min_value, hard if min_value < hard else min_value)
1175 logger.info("Increasing rlimit %s from %s to %s", rname, cval, nval)
1176 resource.setrlimit(rname, nval)
1177 except subprocess.CalledProcessError as error:
1178 logger.warning(
1179 "Failed to assure rlimit [%s] = %s", rname, min_value, exc_info=True
1180 )
1181 if raises:
1182 raise
1183
1184
1185 def fix_netns_limits(ns):
1186
1187 # Maximum read and write socket buffer sizes
1188 sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20])
1189 sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20])
1190
1191 sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0)
1192 sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0)
1193 sysctl_assure(ns, "net.ipv4.conf.lo.rp_filter", 0)
1194
1195 sysctl_assure(ns, "net.ipv4.conf.all.forwarding", 1)
1196 sysctl_assure(ns, "net.ipv4.conf.default.forwarding", 1)
1197
1198 # XXX if things fail look here as this wasn't done previously
1199 sysctl_assure(ns, "net.ipv6.conf.all.forwarding", 1)
1200 sysctl_assure(ns, "net.ipv6.conf.default.forwarding", 1)
1201
1202 # ARP
1203 sysctl_assure(ns, "net.ipv4.conf.default.arp_announce", 2)
1204 sysctl_assure(ns, "net.ipv4.conf.default.arp_notify", 1)
1205 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1206 sysctl_assure(ns, "net.ipv4.conf.default.arp_ignore", 0)
1207 sysctl_assure(ns, "net.ipv4.conf.all.arp_announce", 2)
1208 sysctl_assure(ns, "net.ipv4.conf.all.arp_notify", 1)
1209 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1210 sysctl_assure(ns, "net.ipv4.conf.all.arp_ignore", 0)
1211
1212 sysctl_assure(ns, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
1213
1214 # Keep ipv6 permanent addresses on an admin down
1215 sysctl_assure(ns, "net.ipv6.conf.all.keep_addr_on_down", 1)
1216 if version_cmp(platform.release(), "4.20") >= 0:
1217 sysctl_assure(ns, "net.ipv6.route.skip_notify_on_dev_down", 1)
1218
1219 sysctl_assure(ns, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
1220 sysctl_assure(ns, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
1221
1222 # igmp
1223 sysctl_atleast(ns, "net.ipv4.igmp_max_memberships", 1000)
1224
1225 # Use neigh information on selection of nexthop for multipath hops
1226 sysctl_assure(ns, "net.ipv4.fib_multipath_use_neigh", 1)
1227
1228
1229 def fix_host_limits():
1230 """Increase system limits."""
1231
1232 rlimit_atleast(resource.RLIMIT_NPROC, 8 * 1024)
1233 rlimit_atleast(resource.RLIMIT_NOFILE, 16 * 1024)
1234 sysctl_atleast(None, "fs.file-max", 16 * 1024)
1235 sysctl_atleast(None, "kernel.pty.max", 16 * 1024)
1236
1237 # Enable coredumps
1238 # Original on ubuntu 17.x, but apport won't save as in namespace
1239 # |/usr/share/apport/apport %p %s %c %d %P
1240 sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
1241 sysctl_assure(None, "kernel.core_uses_pid", 1)
1242 sysctl_assure(None, "fs.suid_dumpable", 1)
1243
1244 # Maximum connection backlog
1245 sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
1246
1247 # Maximum read and write socket buffer sizes
1248 sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20)
1249 sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20)
1250
1251 # Garbage Collection Settings for ARP and Neighbors
1252 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
1253 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024)
1254 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024)
1255 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024)
1256 # Hold entries for 10 minutes
1257 sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1258 sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1259
1260 # igmp
1261 sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
1262
1263 # MLD
1264 sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
1265
1266 # Increase routing table size to 128K
1267 sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024)
1268 sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024)
1269
1270
1271 def setup_node_tmpdir(logdir, name):
1272 # Cleanup old log, valgrind, and core files.
1273 subprocess.check_call(
1274 "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir, name), shell=True
1275 )
1276
1277 # Setup the per node directory.
1278 nodelogdir = "{}/{}".format(logdir, name)
1279 subprocess.check_call(
1280 "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True
1281 )
1282 logfile = "{0}/{1}.log".format(logdir, name)
1283 return logfile
1284
1285
1286 class Router(Node):
1287 "A Node with IPv4/IPv6 forwarding enabled"
1288
1289 def __init__(self, name, **params):
1290
1291 # Backward compatibility:
1292 # Load configuration defaults like topogen.
1293 self.config_defaults = configparser.ConfigParser(
1294 defaults={
1295 "verbosity": "info",
1296 "frrdir": "/usr/lib/frr",
1297 "routertype": "frr",
1298 "memleak_path": "",
1299 }
1300 )
1301
1302 self.config_defaults.read(
1303 os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini")
1304 )
1305
1306 # If this topology is using old API and doesn't have logdir
1307 # specified, then attempt to generate an unique logdir.
1308 self.logdir = params.get("logdir")
1309 if self.logdir is None:
1310 self.logdir = get_logs_path(g_extra_config["rundir"])
1311
1312 if not params.get("logger"):
1313 # If logger is present topogen has already set this up
1314 logfile = setup_node_tmpdir(self.logdir, name)
1315 l = topolog.get_logger(name, log_level="debug", target=logfile)
1316 params["logger"] = l
1317
1318 super(Router, self).__init__(name, **params)
1319
1320 self.daemondir = None
1321 self.hasmpls = False
1322 self.routertype = "frr"
1323 self.unified_config = None
1324 self.daemons = {
1325 "zebra": 0,
1326 "ripd": 0,
1327 "ripngd": 0,
1328 "ospfd": 0,
1329 "ospf6d": 0,
1330 "isisd": 0,
1331 "bgpd": 0,
1332 "pimd": 0,
1333 "pim6d": 0,
1334 "ldpd": 0,
1335 "eigrpd": 0,
1336 "nhrpd": 0,
1337 "staticd": 0,
1338 "bfdd": 0,
1339 "sharpd": 0,
1340 "babeld": 0,
1341 "pbrd": 0,
1342 "pathd": 0,
1343 "snmpd": 0,
1344 }
1345 self.daemons_options = {"zebra": ""}
1346 self.reportCores = True
1347 self.version = None
1348
1349 self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid)
1350 try:
1351 # Allow escaping from running inside docker
1352 cgroup = open("/proc/1/cgroup").read()
1353 m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup)
1354 if m:
1355 self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd
1356 except IOError:
1357 pass
1358 else:
1359 logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd))
1360
1361 def _config_frr(self, **params):
1362 "Configure FRR binaries"
1363 self.daemondir = params.get("frrdir")
1364 if self.daemondir is None:
1365 self.daemondir = self.config_defaults.get("topogen", "frrdir")
1366
1367 zebra_path = os.path.join(self.daemondir, "zebra")
1368 if not os.path.isfile(zebra_path):
1369 raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
1370
1371 # pylint: disable=W0221
1372 # Some params are only meaningful for the parent class.
1373 def config(self, **params):
1374 super(Router, self).config(**params)
1375
1376 # User did not specify the daemons directory, try to autodetect it.
1377 self.daemondir = params.get("daemondir")
1378 if self.daemondir is None:
1379 self.routertype = params.get(
1380 "routertype", self.config_defaults.get("topogen", "routertype")
1381 )
1382 self._config_frr(**params)
1383 else:
1384 # Test the provided path
1385 zpath = os.path.join(self.daemondir, "zebra")
1386 if not os.path.isfile(zpath):
1387 raise Exception("No zebra binary found in {}".format(zpath))
1388 # Allow user to specify routertype when the path was specified.
1389 if params.get("routertype") is not None:
1390 self.routertype = params.get("routertype")
1391
1392 # Set ownership of config files
1393 self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype))
1394
1395 def terminate(self):
1396 # Stop running FRR daemons
1397 self.stopRouter()
1398 super(Router, self).terminate()
1399 os.system("chmod -R go+rw " + self.logdir)
1400
1401 # Return count of running daemons
1402 def listDaemons(self):
1403 ret = []
1404 rc, stdout, _ = self.cmd_status(
1405 "ls -1 /var/run/%s/*.pid" % self.routertype, warn=False
1406 )
1407 if rc:
1408 return ret
1409 for d in stdout.strip().split("\n"):
1410 pidfile = d.strip()
1411 try:
1412 pid = int(self.cmd_raises("cat %s" % pidfile, warn=False).strip())
1413 name = os.path.basename(pidfile[:-4])
1414
1415 # probably not compatible with bsd.
1416 rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False)
1417 if rc:
1418 logger.warning(
1419 "%s: %s exited leaving pidfile %s (%s)",
1420 self.name,
1421 name,
1422 pidfile,
1423 pid,
1424 )
1425 self.cmd("rm -- " + pidfile)
1426 else:
1427 ret.append((name, pid))
1428 except (subprocess.CalledProcessError, ValueError):
1429 pass
1430 return ret
1431
1432 def stopRouter(self, assertOnError=True, minErrorVersion="5.1"):
1433 # Stop Running FRR Daemons
1434 running = self.listDaemons()
1435 if not running:
1436 return ""
1437
1438 logger.info("%s: stopping %s", self.name, ", ".join([x[0] for x in running]))
1439 for name, pid in running:
1440 logger.info("{}: sending SIGTERM to {}".format(self.name, name))
1441 try:
1442 os.kill(pid, signal.SIGTERM)
1443 except OSError as err:
1444 logger.info(
1445 "%s: could not kill %s (%s): %s", self.name, name, pid, str(err)
1446 )
1447
1448 running = self.listDaemons()
1449 if running:
1450 for _ in range(0, 30):
1451 sleep(
1452 0.5,
1453 "{}: waiting for daemons stopping: {}".format(
1454 self.name, ", ".join([x[0] for x in running])
1455 ),
1456 )
1457 running = self.listDaemons()
1458 if not running:
1459 break
1460
1461 if not running:
1462 return ""
1463
1464 logger.warning(
1465 "%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running])
1466 )
1467 for name, pid in running:
1468 pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
1469 logger.info("%s: killing %s", self.name, name)
1470 self.cmd("kill -SIGBUS %d" % pid)
1471 self.cmd("rm -- " + pidfile)
1472
1473 sleep(
1474 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name
1475 )
1476
1477 errors = self.checkRouterCores(reportOnce=True)
1478 if self.checkRouterVersion("<", minErrorVersion):
1479 # ignore errors in old versions
1480 errors = ""
1481 if assertOnError and (errors is not None) and len(errors) > 0:
1482 assert "Errors found - details follow:" == 0, errors
1483 return errors
1484
1485 def removeIPs(self):
1486 for interface in self.intfNames():
1487 try:
1488 self.intf_ip_cmd(interface, "ip address flush " + interface)
1489 except Exception as ex:
1490 logger.error("%s can't remove IPs %s", self, str(ex))
1491 # pdb.set_trace()
1492 # assert False, "can't remove IPs %s" % str(ex)
1493
1494 def checkCapability(self, daemon, param):
1495 if param is not None:
1496 daemon_path = os.path.join(self.daemondir, daemon)
1497 daemon_search_option = param.replace("-", "")
1498 output = self.cmd(
1499 "{0} -h | grep {1}".format(daemon_path, daemon_search_option)
1500 )
1501 if daemon_search_option not in output:
1502 return False
1503 return True
1504
1505 def loadConf(self, daemon, source=None, param=None):
1506 """Enabled and set config for a daemon.
1507
1508 Arranges for loading of daemon configuration from the specified source. Possible
1509 `source` values are `None` for an empty config file, a path name which is used
1510 directly, or a file name with no path components which is first looked for
1511 directly and then looked for under a sub-directory named after router.
1512 """
1513
1514 # Unfortunately this API allowsfor source to not exist for any and all routers.
1515 if source:
1516 head, tail = os.path.split(source)
1517 if not head and not self.path_exists(tail):
1518 script_dir = os.environ["PYTEST_TOPOTEST_SCRIPTDIR"]
1519 router_relative = os.path.join(script_dir, self.name, tail)
1520 if self.path_exists(router_relative):
1521 source = router_relative
1522 self.logger.info(
1523 "using router relative configuration: {}".format(source)
1524 )
1525
1526 # print "Daemons before:", self.daemons
1527 if daemon in self.daemons.keys() or daemon == "frr":
1528 if daemon == "frr":
1529 self.unified_config = 1
1530 else:
1531 self.daemons[daemon] = 1
1532 if param is not None:
1533 self.daemons_options[daemon] = param
1534 conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon)
1535 if source is None or not os.path.exists(source):
1536 if daemon == "frr" or not self.unified_config:
1537 self.cmd_raises("rm -f " + conf_file)
1538 self.cmd_raises("touch " + conf_file)
1539 else:
1540 self.cmd_raises("cp {} {}".format(source, conf_file))
1541
1542 if not self.unified_config or daemon == "frr":
1543 self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file))
1544 self.cmd_raises("chmod 664 {}".format(conf_file))
1545
1546 if (daemon == "snmpd") and (self.routertype == "frr"):
1547 # /etc/snmp is private mount now
1548 self.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf')
1549 self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
1550
1551 if (daemon == "zebra") and (self.daemons["staticd"] == 0):
1552 # Add staticd with zebra - if it exists
1553 try:
1554 staticd_path = os.path.join(self.daemondir, "staticd")
1555 except:
1556 pdb.set_trace()
1557
1558 if os.path.isfile(staticd_path):
1559 self.daemons["staticd"] = 1
1560 self.daemons_options["staticd"] = ""
1561 # Auto-Started staticd has no config, so it will read from zebra config
1562 else:
1563 logger.info("No daemon {} known".format(daemon))
1564 # print "Daemons after:", self.daemons
1565
1566 def runInWindow(self, cmd, title=None):
1567 return self.run_in_window(cmd, title)
1568
1569 def startRouter(self, tgen=None):
1570 if self.unified_config:
1571 self.cmd(
1572 'echo "service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1573 % self.routertype
1574 )
1575 else:
1576 # Disable integrated-vtysh-config
1577 self.cmd(
1578 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1579 % self.routertype
1580 )
1581
1582 self.cmd(
1583 "chown %s:%svty /etc/%s/vtysh.conf"
1584 % (self.routertype, self.routertype, self.routertype)
1585 )
1586 # TODO remove the following lines after all tests are migrated to Topogen.
1587 # Try to find relevant old logfiles in /tmp and delete them
1588 map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
1589 # Remove old core files
1590 map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name)))
1591 # Remove IP addresses from OS first - we have them in zebra.conf
1592 self.removeIPs()
1593 # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
1594 # No error - but return message and skip all the tests
1595 if self.daemons["ldpd"] == 1:
1596 ldpd_path = os.path.join(self.daemondir, "ldpd")
1597 if not os.path.isfile(ldpd_path):
1598 logger.info("LDP Test, but no ldpd compiled or installed")
1599 return "LDP Test, but no ldpd compiled or installed"
1600
1601 if version_cmp(platform.release(), "4.5") < 0:
1602 logger.info("LDP Test need Linux Kernel 4.5 minimum")
1603 return "LDP Test need Linux Kernel 4.5 minimum"
1604 # Check if have mpls
1605 if tgen != None:
1606 self.hasmpls = tgen.hasmpls
1607 if self.hasmpls != True:
1608 logger.info(
1609 "LDP/MPLS Tests will be skipped, platform missing module(s)"
1610 )
1611 else:
1612 # Test for MPLS Kernel modules available
1613 self.hasmpls = False
1614 if not module_present("mpls-router"):
1615 logger.info(
1616 "MPLS tests will not run (missing mpls-router kernel module)"
1617 )
1618 elif not module_present("mpls-iptunnel"):
1619 logger.info(
1620 "MPLS tests will not run (missing mpls-iptunnel kernel module)"
1621 )
1622 else:
1623 self.hasmpls = True
1624 if self.hasmpls != True:
1625 return "LDP/MPLS Tests need mpls kernel modules"
1626
1627 # Really want to use sysctl_atleast here, but only when MPLS is actually being
1628 # used
1629 self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
1630
1631 shell_routers = g_extra_config["shell"]
1632 if "all" in shell_routers or self.name in shell_routers:
1633 self.run_in_window(os.getenv("SHELL", "bash"), title="sh-%s" % self.name)
1634
1635 if self.daemons["eigrpd"] == 1:
1636 eigrpd_path = os.path.join(self.daemondir, "eigrpd")
1637 if not os.path.isfile(eigrpd_path):
1638 logger.info("EIGRP Test, but no eigrpd compiled or installed")
1639 return "EIGRP Test, but no eigrpd compiled or installed"
1640
1641 if self.daemons["bfdd"] == 1:
1642 bfdd_path = os.path.join(self.daemondir, "bfdd")
1643 if not os.path.isfile(bfdd_path):
1644 logger.info("BFD Test, but no bfdd compiled or installed")
1645 return "BFD Test, but no bfdd compiled or installed"
1646
1647 status = self.startRouterDaemons(tgen=tgen)
1648
1649 vtysh_routers = g_extra_config["vtysh"]
1650 if "all" in vtysh_routers or self.name in vtysh_routers:
1651 self.run_in_window("vtysh", title="vt-%s" % self.name)
1652
1653 if self.unified_config:
1654 self.cmd("vtysh -f /etc/frr/frr.conf")
1655
1656 return status
1657
1658 def getStdErr(self, daemon):
1659 return self.getLog("err", daemon)
1660
1661 def getStdOut(self, daemon):
1662 return self.getLog("out", daemon)
1663
1664 def getLog(self, log, daemon):
1665 return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log))
1666
1667 def startRouterDaemons(self, daemons=None, tgen=None):
1668 "Starts FRR daemons for this router."
1669
1670 asan_abort = g_extra_config["asan_abort"]
1671 gdb_breakpoints = g_extra_config["gdb_breakpoints"]
1672 gdb_daemons = g_extra_config["gdb_daemons"]
1673 gdb_routers = g_extra_config["gdb_routers"]
1674 valgrind_extra = g_extra_config["valgrind_extra"]
1675 valgrind_memleaks = g_extra_config["valgrind_memleaks"]
1676 strace_daemons = g_extra_config["strace_daemons"]
1677
1678 # Get global bundle data
1679 if not self.path_exists("/etc/frr/support_bundle_commands.conf"):
1680 # Copy global value if was covered by namespace mount
1681 bundle_data = ""
1682 if os.path.exists("/etc/frr/support_bundle_commands.conf"):
1683 with open("/etc/frr/support_bundle_commands.conf", "r") as rf:
1684 bundle_data = rf.read()
1685 self.cmd_raises(
1686 "cat > /etc/frr/support_bundle_commands.conf",
1687 stdin=bundle_data,
1688 )
1689
1690 # Starts actual daemons without init (ie restart)
1691 # cd to per node directory
1692 self.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self.logdir, self.name))
1693 self.set_cwd("{}/{}".format(self.logdir, self.name))
1694 self.cmd("umask 000")
1695
1696 # Re-enable to allow for report per run
1697 self.reportCores = True
1698
1699 # XXX: glue code forward ported from removed function.
1700 if self.version == None:
1701 self.version = self.cmd(
1702 os.path.join(self.daemondir, "bgpd") + " -v"
1703 ).split()[2]
1704 logger.info("{}: running version: {}".format(self.name, self.version))
1705 # If `daemons` was specified then some upper API called us with
1706 # specific daemons, otherwise just use our own configuration.
1707 daemons_list = []
1708 if daemons is not None:
1709 daemons_list = daemons
1710 else:
1711 # Append all daemons configured.
1712 for daemon in self.daemons:
1713 if self.daemons[daemon] == 1:
1714 daemons_list.append(daemon)
1715
1716 def start_daemon(daemon, extra_opts=None):
1717 daemon_opts = self.daemons_options.get(daemon, "")
1718 rediropt = " > {0}.out 2> {0}.err".format(daemon)
1719 if daemon == "snmpd":
1720 binary = "/usr/sbin/snmpd"
1721 cmdenv = ""
1722 cmdopt = "{} -C -c /etc/frr/snmpd.conf -p ".format(
1723 daemon_opts
1724 ) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self.routertype)
1725 else:
1726 binary = os.path.join(self.daemondir, daemon)
1727
1728 cmdenv = "ASAN_OPTIONS="
1729 if asan_abort:
1730 cmdenv = "abort_on_error=1:"
1731 cmdenv += "log_path={0}/{1}.{2}.asan ".format(
1732 self.logdir, self.name, daemon
1733 )
1734
1735 if valgrind_memleaks:
1736 this_dir = os.path.dirname(
1737 os.path.abspath(os.path.realpath(__file__))
1738 )
1739 supp_file = os.path.abspath(
1740 os.path.join(this_dir, "../../../tools/valgrind.supp")
1741 )
1742 cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
1743 daemon, self.logdir, self.name, supp_file
1744 )
1745 if valgrind_extra:
1746 cmdenv += (
1747 " --gen-suppressions=all --expensive-definedness-checks=yes"
1748 )
1749 elif daemon in strace_daemons or "all" in strace_daemons:
1750 cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(
1751 daemon, self.logdir, self.name
1752 )
1753
1754 cmdopt = "{} --command-log-always --log file:{}.log --log-level debug".format(
1755 daemon_opts, daemon
1756 )
1757 if extra_opts:
1758 cmdopt += " " + extra_opts
1759
1760 if (
1761 (gdb_routers or gdb_daemons)
1762 and (
1763 not gdb_routers or self.name in gdb_routers or "all" in gdb_routers
1764 )
1765 and (not gdb_daemons or daemon in gdb_daemons or "all" in gdb_daemons)
1766 ):
1767 if daemon == "snmpd":
1768 cmdopt += " -f "
1769
1770 cmdopt += rediropt
1771 gdbcmd = "sudo -E gdb " + binary
1772 if gdb_breakpoints:
1773 gdbcmd += " -ex 'set breakpoint pending on'"
1774 for bp in gdb_breakpoints:
1775 gdbcmd += " -ex 'b {}'".format(bp)
1776 gdbcmd += " -ex 'run {}'".format(cmdopt)
1777
1778 self.run_in_window(gdbcmd, daemon)
1779
1780 logger.info(
1781 "%s: %s %s launched in gdb window", self, self.routertype, daemon
1782 )
1783 else:
1784 if daemon != "snmpd":
1785 cmdopt += " -d "
1786 cmdopt += rediropt
1787
1788 try:
1789 self.cmd_raises(" ".join([cmdenv, binary, cmdopt]), warn=False)
1790 except subprocess.CalledProcessError as error:
1791 self.logger.error(
1792 '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
1793 self,
1794 daemon,
1795 error.returncode,
1796 error.cmd,
1797 '\n:stdout: "{}"'.format(error.stdout.strip())
1798 if error.stdout
1799 else "",
1800 '\n:stderr: "{}"'.format(error.stderr.strip())
1801 if error.stderr
1802 else "",
1803 )
1804 else:
1805 logger.info("%s: %s %s started", self, self.routertype, daemon)
1806
1807 # Start Zebra first
1808 if "zebra" in daemons_list:
1809 start_daemon("zebra", "-s 90000000")
1810 while "zebra" in daemons_list:
1811 daemons_list.remove("zebra")
1812
1813 # Start staticd next if required
1814 if "staticd" in daemons_list:
1815 start_daemon("staticd")
1816 while "staticd" in daemons_list:
1817 daemons_list.remove("staticd")
1818
1819 if "snmpd" in daemons_list:
1820 # Give zerbra a chance to configure interface addresses that snmpd daemon
1821 # may then use.
1822 time.sleep(2)
1823
1824 start_daemon("snmpd")
1825 while "snmpd" in daemons_list:
1826 daemons_list.remove("snmpd")
1827
1828 if daemons is None:
1829 # Fix Link-Local Addresses on initial startup
1830 # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
1831 _, output, _ = self.cmd_status(
1832 "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done",
1833 stderr=subprocess.STDOUT,
1834 )
1835 logger.debug("Set MACs:\n%s", output)
1836
1837 # Now start all the other daemons
1838 for daemon in daemons_list:
1839 if self.daemons[daemon] == 0:
1840 continue
1841 start_daemon(daemon)
1842
1843 # Check if daemons are running.
1844 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1845 if re.search(r"No such file or directory", rundaemons):
1846 return "Daemons are not running"
1847
1848 # Update the permissions on the log files
1849 self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name))
1850 self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name))
1851
1852 return ""
1853
1854 def killRouterDaemons(
1855 self, daemons, wait=True, assertOnError=True, minErrorVersion="5.1"
1856 ):
1857 # Kill Running FRR
1858 # Daemons(user specified daemon only) using SIGKILL
1859 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1860 errors = ""
1861 daemonsNotRunning = []
1862 if re.search(r"No such file or directory", rundaemons):
1863 return errors
1864 for daemon in daemons:
1865 if rundaemons is not None and daemon in rundaemons:
1866 numRunning = 0
1867 dmns = rundaemons.split("\n")
1868 # Exclude empty string at end of list
1869 for d in dmns[:-1]:
1870 if re.search(r"%s" % daemon, d):
1871 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1872 if daemonpid.isdigit() and pid_exists(int(daemonpid)):
1873 logger.info(
1874 "{}: killing {}".format(
1875 self.name,
1876 os.path.basename(d.rstrip().rsplit(".", 1)[0]),
1877 )
1878 )
1879 self.cmd("kill -9 %s" % daemonpid)
1880 if pid_exists(int(daemonpid)):
1881 numRunning += 1
1882 while wait and numRunning > 0:
1883 sleep(
1884 2,
1885 "{}: waiting for {} daemon to be stopped".format(
1886 self.name, daemon
1887 ),
1888 )
1889
1890 # 2nd round of kill if daemons didn't exit
1891 for d in dmns[:-1]:
1892 if re.search(r"%s" % daemon, d):
1893 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1894 if daemonpid.isdigit() and pid_exists(
1895 int(daemonpid)
1896 ):
1897 logger.info(
1898 "{}: killing {}".format(
1899 self.name,
1900 os.path.basename(
1901 d.rstrip().rsplit(".", 1)[0]
1902 ),
1903 )
1904 )
1905 self.cmd("kill -9 %s" % daemonpid)
1906 if daemonpid.isdigit() and not pid_exists(
1907 int(daemonpid)
1908 ):
1909 numRunning -= 1
1910 self.cmd("rm -- {}".format(d.rstrip()))
1911 if wait:
1912 errors = self.checkRouterCores(reportOnce=True)
1913 if self.checkRouterVersion("<", minErrorVersion):
1914 # ignore errors in old versions
1915 errors = ""
1916 if assertOnError and len(errors) > 0:
1917 assert "Errors found - details follow:" == 0, errors
1918 else:
1919 daemonsNotRunning.append(daemon)
1920 if len(daemonsNotRunning) > 0:
1921 errors = errors + "Daemons are not running", daemonsNotRunning
1922
1923 return errors
1924
1925 def checkRouterCores(self, reportLeaks=True, reportOnce=False):
1926 if reportOnce and not self.reportCores:
1927 return
1928 reportMade = False
1929 traces = ""
1930 for daemon in self.daemons:
1931 if self.daemons[daemon] == 1:
1932 # Look for core file
1933 corefiles = glob.glob(
1934 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
1935 )
1936 if len(corefiles) > 0:
1937 backtrace = gdb_core(self, daemon, corefiles)
1938 traces = (
1939 traces
1940 + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s"
1941 % (self.name, daemon, backtrace)
1942 )
1943 reportMade = True
1944 elif reportLeaks:
1945 log = self.getStdErr(daemon)
1946 if "memstats" in log:
1947 sys.stderr.write(
1948 "%s: %s has memory leaks:\n" % (self.name, daemon)
1949 )
1950 traces = traces + "\n%s: %s has memory leaks:\n" % (
1951 self.name,
1952 daemon,
1953 )
1954 log = re.sub("core_handler: ", "", log)
1955 log = re.sub(
1956 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
1957 r"\n ## \1",
1958 log,
1959 )
1960 log = re.sub("memstats: ", " ", log)
1961 sys.stderr.write(log)
1962 reportMade = True
1963 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
1964 if checkAddressSanitizerError(
1965 self.getStdErr(daemon), self.name, daemon, self.logdir
1966 ):
1967 sys.stderr.write(
1968 "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)
1969 )
1970 traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (
1971 self.name,
1972 daemon,
1973 )
1974 reportMade = True
1975 if reportMade:
1976 self.reportCores = False
1977 return traces
1978
1979 def checkRouterRunning(self):
1980 "Check if router daemons are running and collect crashinfo they don't run"
1981
1982 global fatal_error
1983
1984 daemonsRunning = self.cmd(
1985 'vtysh -c "show logging" | grep "Logging configuration for"'
1986 )
1987 # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
1988 if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"):
1989 return "%s: vtysh killed by AddressSanitizer" % (self.name)
1990
1991 for daemon in self.daemons:
1992 if daemon == "snmpd":
1993 continue
1994 if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
1995 sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
1996 if daemon == "staticd":
1997 sys.stderr.write(
1998 "You may have a copy of staticd installed but are attempting to test against\n"
1999 )
2000 sys.stderr.write(
2001 "a version of FRR that does not have staticd, please cleanup the install dir\n"
2002 )
2003
2004 # Look for core file
2005 corefiles = glob.glob(
2006 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
2007 )
2008 if len(corefiles) > 0:
2009 gdb_core(self, daemon, corefiles)
2010 else:
2011 # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
2012 if os.path.isfile(
2013 "{}/{}/{}.log".format(self.logdir, self.name, daemon)
2014 ):
2015 log_tail = subprocess.check_output(
2016 [
2017 "tail -n20 {}/{}/{}.log 2> /dev/null".format(
2018 self.logdir, self.name, daemon
2019 )
2020 ],
2021 shell=True,
2022 )
2023 sys.stderr.write(
2024 "\nFrom %s %s %s log file:\n"
2025 % (self.routertype, self.name, daemon)
2026 )
2027 sys.stderr.write("%s\n" % log_tail)
2028
2029 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2030 if checkAddressSanitizerError(
2031 self.getStdErr(daemon), self.name, daemon, self.logdir
2032 ):
2033 return "%s: Daemon %s not running - killed by AddressSanitizer" % (
2034 self.name,
2035 daemon,
2036 )
2037
2038 return "%s: Daemon %s not running" % (self.name, daemon)
2039 return ""
2040
2041 def checkRouterVersion(self, cmpop, version):
2042 """
2043 Compares router version using operation `cmpop` with `version`.
2044 Valid `cmpop` values:
2045 * `>=`: has the same version or greater
2046 * '>': has greater version
2047 * '=': has the same version
2048 * '<': has a lesser version
2049 * '<=': has the same version or lesser
2050
2051 Usage example: router.checkRouterVersion('>', '1.0')
2052 """
2053
2054 # Make sure we have version information first
2055 if self.version == None:
2056 self.version = self.cmd(
2057 os.path.join(self.daemondir, "bgpd") + " -v"
2058 ).split()[2]
2059 logger.info("{}: running version: {}".format(self.name, self.version))
2060
2061 rversion = self.version
2062 if rversion == None:
2063 return False
2064
2065 result = version_cmp(rversion, version)
2066 if cmpop == ">=":
2067 return result >= 0
2068 if cmpop == ">":
2069 return result > 0
2070 if cmpop == "=":
2071 return result == 0
2072 if cmpop == "<":
2073 return result < 0
2074 if cmpop == "<":
2075 return result < 0
2076 if cmpop == "<=":
2077 return result <= 0
2078
2079 def get_ipv6_linklocal(self):
2080 "Get LinkLocal Addresses from interfaces"
2081
2082 linklocal = []
2083
2084 ifaces = self.cmd("ip -6 address")
2085 # Fix newlines (make them all the same)
2086 ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
2087 interface = ""
2088 ll_per_if_count = 0
2089 for line in ifaces:
2090 m = re.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line)
2091 if m:
2092 interface = m.group(1)
2093 ll_per_if_count = 0
2094 m = re.search(
2095 "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
2096 line,
2097 )
2098 if m:
2099 local = m.group(1)
2100 ll_per_if_count += 1
2101 if ll_per_if_count > 1:
2102 linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
2103 else:
2104 linklocal += [[interface, local]]
2105 return linklocal
2106
2107 def daemon_available(self, daemon):
2108 "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
2109
2110 daemon_path = os.path.join(self.daemondir, daemon)
2111 if not os.path.isfile(daemon_path):
2112 return False
2113 if daemon == "ldpd":
2114 if version_cmp(platform.release(), "4.5") < 0:
2115 return False
2116 if not module_present("mpls-router", load=False):
2117 return False
2118 if not module_present("mpls-iptunnel", load=False):
2119 return False
2120 return True
2121
2122 def get_routertype(self):
2123 "Return the type of Router (frr)"
2124
2125 return self.routertype
2126
2127 def report_memory_leaks(self, filename_prefix, testscript):
2128 "Report Memory Leaks to file prefixed with given string"
2129
2130 leakfound = False
2131 filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt"
2132 for daemon in self.daemons:
2133 if self.daemons[daemon] == 1:
2134 log = self.getStdErr(daemon)
2135 if "memstats" in log:
2136 # Found memory leak
2137 logger.info(
2138 "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log)
2139 )
2140 if not leakfound:
2141 leakfound = True
2142 # Check if file already exists
2143 fileexists = os.path.isfile(filename)
2144 leakfile = open(filename, "a")
2145 if not fileexists:
2146 # New file - add header
2147 leakfile.write(
2148 "# Memory Leak Detection for topotest %s\n\n"
2149 % testscript
2150 )
2151 leakfile.write("## Router %s\n" % self.name)
2152 leakfile.write("### Process %s\n" % daemon)
2153 log = re.sub("core_handler: ", "", log)
2154 log = re.sub(
2155 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
2156 r"\n#### \1\n",
2157 log,
2158 )
2159 log = re.sub("memstats: ", " ", log)
2160 leakfile.write(log)
2161 leakfile.write("\n")
2162 if leakfound:
2163 leakfile.close()
2164
2165
2166 def frr_unicode(s):
2167 """Convert string to unicode, depending on python version"""
2168 if sys.version_info[0] > 2:
2169 return s
2170 else:
2171 return unicode(s) # pylint: disable=E0602
2172
2173
2174 def is_mapping(o):
2175 return isinstance(o, Mapping)