]> git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/lib/topotest.py
Merge pull request #10763 from donaldsharp/plist_speedup
[mirror_frr.git] / tests / topotests / lib / topotest.py
1 #!/usr/bin/env python
2
3 #
4 # topotest.py
5 # Library of helper functions for NetDEF Topology Tests
6 #
7 # Copyright (c) 2016 by
8 # Network Device Education Foundation, Inc. ("NetDEF")
9 #
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
13 # in all copies.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 # OF THIS SOFTWARE.
23 #
24
25 import difflib
26 import errno
27 import functools
28 import glob
29 import json
30 import os
31 import pdb
32 import platform
33 import re
34 import resource
35 import signal
36 import subprocess
37 import sys
38 import tempfile
39 import time
40 from copy import deepcopy
41
42 import lib.topolog as topolog
43 from lib.topolog import logger
44
45 if sys.version_info[0] > 2:
46 import configparser
47 from collections.abc import Mapping
48 else:
49 import ConfigParser as configparser
50 from collections import Mapping
51
52 from lib import micronet
53 from lib.micronet_compat import Node
54
55 g_extra_config = {}
56
57
58 def get_logs_path(rundir):
59 logspath = topolog.get_test_logdir()
60 return os.path.join(rundir, logspath)
61
62
63 def gdb_core(obj, daemon, corefiles):
64 gdbcmds = """
65 info threads
66 bt full
67 disassemble
68 up
69 disassemble
70 up
71 disassemble
72 up
73 disassemble
74 up
75 disassemble
76 up
77 disassemble
78 """
79 gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")]
80 gdbcmds = [item for sl in gdbcmds for item in sl]
81
82 daemon_path = os.path.join(obj.daemondir, daemon)
83 backtrace = subprocess.check_output(
84 ["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds
85 )
86 sys.stderr.write(
87 "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon)
88 )
89 sys.stderr.write("%s" % backtrace)
90 return backtrace
91
92
93 class json_cmp_result(object):
94 "json_cmp result class for better assertion messages"
95
96 def __init__(self):
97 self.errors = []
98
99 def add_error(self, error):
100 "Append error message to the result"
101 for line in error.splitlines():
102 self.errors.append(line)
103
104 def has_errors(self):
105 "Returns True if there were errors, otherwise False."
106 return len(self.errors) > 0
107
108 def gen_report(self):
109 headline = ["Generated JSON diff error report:", ""]
110 return headline + self.errors
111
112 def __str__(self):
113 return (
114 "Generated JSON diff error report:\n\n\n" + "\n".join(self.errors) + "\n\n"
115 )
116
117
118 def gen_json_diff_report(d1, d2, exact=False, path="> $", acc=(0, "")):
119 """
120 Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye.
121 """
122
123 def dump_json(v):
124 if isinstance(v, (dict, list)):
125 return "\t" + "\t".join(
126 json.dumps(v, indent=4, separators=(",", ": ")).splitlines(True)
127 )
128 else:
129 return "'{}'".format(v)
130
131 def json_type(v):
132 if isinstance(v, (list, tuple)):
133 return "Array"
134 elif isinstance(v, dict):
135 return "Object"
136 elif isinstance(v, (int, float)):
137 return "Number"
138 elif isinstance(v, bool):
139 return "Boolean"
140 elif isinstance(v, str):
141 return "String"
142 elif v == None:
143 return "null"
144
145 def get_errors(other_acc):
146 return other_acc[1]
147
148 def get_errors_n(other_acc):
149 return other_acc[0]
150
151 def add_error(acc, msg, points=1):
152 return (acc[0] + points, acc[1] + "{}: {}\n".format(path, msg))
153
154 def merge_errors(acc, other_acc):
155 return (acc[0] + other_acc[0], acc[1] + other_acc[1])
156
157 def add_idx(idx):
158 return "{}[{}]".format(path, idx)
159
160 def add_key(key):
161 return "{}->{}".format(path, key)
162
163 def has_errors(other_acc):
164 return other_acc[0] > 0
165
166 if d2 == "*" or (
167 not isinstance(d1, (list, dict))
168 and not isinstance(d2, (list, dict))
169 and d1 == d2
170 ):
171 return acc
172 elif (
173 not isinstance(d1, (list, dict))
174 and not isinstance(d2, (list, dict))
175 and d1 != d2
176 ):
177 acc = add_error(
178 acc,
179 "d1 has element with value '{}' but in d2 it has value '{}'".format(d1, d2),
180 )
181 elif (
182 isinstance(d1, list)
183 and isinstance(d2, list)
184 and ((len(d2) > 0 and d2[0] == "__ordered__") or exact)
185 ):
186 if not exact:
187 del d2[0]
188 if len(d1) != len(d2):
189 acc = add_error(
190 acc,
191 "d1 has Array of length {} but in d2 it is of length {}".format(
192 len(d1), len(d2)
193 ),
194 )
195 else:
196 for idx, v1, v2 in zip(range(0, len(d1)), d1, d2):
197 acc = merge_errors(
198 acc, gen_json_diff_report(v1, v2, exact=exact, path=add_idx(idx))
199 )
200 elif isinstance(d1, list) and isinstance(d2, list):
201 if len(d1) < len(d2):
202 acc = add_error(
203 acc,
204 "d1 has Array of length {} but in d2 it is of length {}".format(
205 len(d1), len(d2)
206 ),
207 )
208 else:
209 for idx2, v2 in zip(range(0, len(d2)), d2):
210 found_match = False
211 closest_diff = None
212 closest_idx = None
213 for idx1, v1 in zip(range(0, len(d1)), d1):
214 tmp_v1 = deepcopy(v1)
215 tmp_v2 = deepcopy(v2)
216 tmp_diff = gen_json_diff_report(tmp_v1, tmp_v2, path=add_idx(idx1))
217 if not has_errors(tmp_diff):
218 found_match = True
219 del d1[idx1]
220 break
221 elif not closest_diff or get_errors_n(tmp_diff) < get_errors_n(
222 closest_diff
223 ):
224 closest_diff = tmp_diff
225 closest_idx = idx1
226 if not found_match and isinstance(v2, (list, dict)):
227 sub_error = "\n\n\t{}".format(
228 "\t".join(get_errors(closest_diff).splitlines(True))
229 )
230 acc = add_error(
231 acc,
232 (
233 "d2 has the following element at index {} which is not present in d1: "
234 + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}"
235 ).format(idx2, dump_json(v2), closest_idx, sub_error),
236 )
237 if not found_match and not isinstance(v2, (list, dict)):
238 acc = add_error(
239 acc,
240 "d2 has the following element at index {} which is not present in d1: {}".format(
241 idx2, dump_json(v2)
242 ),
243 )
244 elif isinstance(d1, dict) and isinstance(d2, dict) and exact:
245 invalid_keys_d1 = [k for k in d1.keys() if k not in d2.keys()]
246 invalid_keys_d2 = [k for k in d2.keys() if k not in d1.keys()]
247 for k in invalid_keys_d1:
248 acc = add_error(acc, "d1 has key '{}' which is not present in d2".format(k))
249 for k in invalid_keys_d2:
250 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
251 valid_keys_intersection = [k for k in d1.keys() if k in d2.keys()]
252 for k in valid_keys_intersection:
253 acc = merge_errors(
254 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
255 )
256 elif isinstance(d1, dict) and isinstance(d2, dict):
257 none_keys = [k for k, v in d2.items() if v == None]
258 none_keys_present = [k for k in d1.keys() if k in none_keys]
259 for k in none_keys_present:
260 acc = add_error(
261 acc, "d1 has key '{}' which is not supposed to be present".format(k)
262 )
263 keys = [k for k, v in d2.items() if v != None]
264 invalid_keys_intersection = [k for k in keys if k not in d1.keys()]
265 for k in invalid_keys_intersection:
266 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
267 valid_keys_intersection = [k for k in keys if k in d1.keys()]
268 for k in valid_keys_intersection:
269 acc = merge_errors(
270 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
271 )
272 else:
273 acc = add_error(
274 acc,
275 "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format(
276 json_type(d1), json_type(d2)
277 ),
278 points=2,
279 )
280
281 return acc
282
283
284 def json_cmp(d1, d2, exact=False):
285 """
286 JSON compare function. Receives two parameters:
287 * `d1`: parsed JSON data structure
288 * `d2`: parsed JSON data structure
289
290 Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
291 in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an
292 error report is generated and wrapped in a 'json_cmp_result()'. There are special
293 parameters and notations explained below which can be used to cover rather unusual
294 cases:
295
296 * when 'exact is set to 'True' then d1 and d2 are tested for equality (including
297 order within JSON Arrays)
298 * using 'null' (or 'None' in Python) as JSON Object value is checking for key
299 absence in d1
300 * using '*' as JSON Object value or Array value is checking for presence in d1
301 without checking the values
302 * using '__ordered__' as first element in a JSON Array in d2 will also check the
303 order when it is compared to an Array in d1
304 """
305
306 (errors_n, errors) = gen_json_diff_report(deepcopy(d1), deepcopy(d2), exact=exact)
307
308 if errors_n > 0:
309 result = json_cmp_result()
310 result.add_error(errors)
311 return result
312 else:
313 return None
314
315
316 def router_output_cmp(router, cmd, expected):
317 """
318 Runs `cmd` in router and compares the output with `expected`.
319 """
320 return difflines(
321 normalize_text(router.vtysh_cmd(cmd)),
322 normalize_text(expected),
323 title1="Current output",
324 title2="Expected output",
325 )
326
327
328 def router_json_cmp(router, cmd, data, exact=False):
329 """
330 Runs `cmd` that returns JSON data (normally the command ends with 'json')
331 and compare with `data` contents.
332 """
333 return json_cmp(router.vtysh_cmd(cmd, isjson=True), data, exact)
334
335
336 def run_and_expect(func, what, count=20, wait=3):
337 """
338 Run `func` and compare the result with `what`. Do it for `count` times
339 waiting `wait` seconds between tries. By default it tries 20 times with
340 3 seconds delay between tries.
341
342 Returns (True, func-return) on success or
343 (False, func-return) on failure.
344
345 ---
346
347 Helper functions to use with this function:
348 - router_output_cmp
349 - router_json_cmp
350 """
351 start_time = time.time()
352 func_name = "<unknown>"
353 if func.__class__ == functools.partial:
354 func_name = func.func.__name__
355 else:
356 func_name = func.__name__
357
358 logger.info(
359 "'{}' polling started (interval {} secs, maximum {} tries)".format(
360 func_name, wait, count
361 )
362 )
363
364 while count > 0:
365 result = func()
366 if result != what:
367 time.sleep(wait)
368 count -= 1
369 continue
370
371 end_time = time.time()
372 logger.info(
373 "'{}' succeeded after {:.2f} seconds".format(
374 func_name, end_time - start_time
375 )
376 )
377 return (True, result)
378
379 end_time = time.time()
380 logger.error(
381 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
382 )
383 return (False, result)
384
385
386 def run_and_expect_type(func, etype, count=20, wait=3, avalue=None):
387 """
388 Run `func` and compare the result with `etype`. Do it for `count` times
389 waiting `wait` seconds between tries. By default it tries 20 times with
390 3 seconds delay between tries.
391
392 This function is used when you want to test the return type and,
393 optionally, the return value.
394
395 Returns (True, func-return) on success or
396 (False, func-return) on failure.
397 """
398 start_time = time.time()
399 func_name = "<unknown>"
400 if func.__class__ == functools.partial:
401 func_name = func.func.__name__
402 else:
403 func_name = func.__name__
404
405 logger.info(
406 "'{}' polling started (interval {} secs, maximum wait {} secs)".format(
407 func_name, wait, int(wait * count)
408 )
409 )
410
411 while count > 0:
412 result = func()
413 if not isinstance(result, etype):
414 logger.debug(
415 "Expected result type '{}' got '{}' instead".format(etype, type(result))
416 )
417 time.sleep(wait)
418 count -= 1
419 continue
420
421 if etype != type(None) and avalue != None and result != avalue:
422 logger.debug("Expected value '{}' got '{}' instead".format(avalue, result))
423 time.sleep(wait)
424 count -= 1
425 continue
426
427 end_time = time.time()
428 logger.info(
429 "'{}' succeeded after {:.2f} seconds".format(
430 func_name, end_time - start_time
431 )
432 )
433 return (True, result)
434
435 end_time = time.time()
436 logger.error(
437 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
438 )
439 return (False, result)
440
441
442 def router_json_cmp_retry(router, cmd, data, exact=False, retry_timeout=10.0):
443 """
444 Runs `cmd` that returns JSON data (normally the command ends with 'json')
445 and compare with `data` contents. Retry by default for 10 seconds
446 """
447
448 def test_func():
449 return router_json_cmp(router, cmd, data, exact)
450
451 ok, _ = run_and_expect(test_func, None, int(retry_timeout), 1)
452 return ok
453
454
455 def int2dpid(dpid):
456 "Converting Integer to DPID"
457
458 try:
459 dpid = hex(dpid)[2:]
460 dpid = "0" * (16 - len(dpid)) + dpid
461 return dpid
462 except IndexError:
463 raise Exception(
464 "Unable to derive default datapath ID - "
465 "please either specify a dpid or use a "
466 "canonical switch name such as s23."
467 )
468
469
470 def pid_exists(pid):
471 "Check whether pid exists in the current process table."
472
473 if pid <= 0:
474 return False
475 try:
476 os.waitpid(pid, os.WNOHANG)
477 except:
478 pass
479 try:
480 os.kill(pid, 0)
481 except OSError as err:
482 if err.errno == errno.ESRCH:
483 # ESRCH == No such process
484 return False
485 elif err.errno == errno.EPERM:
486 # EPERM clearly means there's a process to deny access to
487 return True
488 else:
489 # According to "man 2 kill" possible error values are
490 # (EINVAL, EPERM, ESRCH)
491 raise
492 else:
493 return True
494
495
496 def get_textdiff(text1, text2, title1="", title2="", **opts):
497 "Returns empty string if same or formatted diff"
498
499 diff = "\n".join(
500 difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts)
501 )
502 # Clean up line endings
503 diff = os.linesep.join([s for s in diff.splitlines() if s])
504 return diff
505
506
507 def difflines(text1, text2, title1="", title2="", **opts):
508 "Wrapper for get_textdiff to avoid string transformations."
509 text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1)
510 text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1)
511 return get_textdiff(text1, text2, title1, title2, **opts)
512
513
514 def get_file(content):
515 """
516 Generates a temporary file in '/tmp' with `content` and returns the file name.
517 """
518 if isinstance(content, list) or isinstance(content, tuple):
519 content = "\n".join(content)
520 fde = tempfile.NamedTemporaryFile(mode="w", delete=False)
521 fname = fde.name
522 fde.write(content)
523 fde.close()
524 return fname
525
526
527 def normalize_text(text):
528 """
529 Strips formating spaces/tabs, carriage returns and trailing whitespace.
530 """
531 text = re.sub(r"[ \t]+", " ", text)
532 text = re.sub(r"\r", "", text)
533
534 # Remove whitespace in the middle of text.
535 text = re.sub(r"[ \t]+\n", "\n", text)
536 # Remove whitespace at the end of the text.
537 text = text.rstrip()
538
539 return text
540
541
542 def is_linux():
543 """
544 Parses unix name output to check if running on GNU/Linux.
545
546 Returns True if running on Linux, returns False otherwise.
547 """
548
549 if os.uname()[0] == "Linux":
550 return True
551 return False
552
553
554 def iproute2_is_vrf_capable():
555 """
556 Checks if the iproute2 version installed on the system is capable of
557 handling VRFs by interpreting the output of the 'ip' utility found in PATH.
558
559 Returns True if capability can be detected, returns False otherwise.
560 """
561
562 if is_linux():
563 try:
564 subp = subprocess.Popen(
565 ["ip", "route", "show", "vrf"],
566 stdout=subprocess.PIPE,
567 stderr=subprocess.PIPE,
568 stdin=subprocess.PIPE,
569 )
570 iproute2_err = subp.communicate()[1].splitlines()[0].split()[0]
571
572 if iproute2_err != "Error:":
573 return True
574 except Exception:
575 pass
576 return False
577
578
579 def module_present_linux(module, load):
580 """
581 Returns whether `module` is present.
582
583 If `load` is true, it will try to load it via modprobe.
584 """
585 with open("/proc/modules", "r") as modules_file:
586 if module.replace("-", "_") in modules_file.read():
587 return True
588 cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module)
589 if os.system(cmd) != 0:
590 return False
591 else:
592 return True
593
594
595 def module_present_freebsd(module, load):
596 return True
597
598
599 def module_present(module, load=True):
600 if sys.platform.startswith("linux"):
601 return module_present_linux(module, load)
602 elif sys.platform.startswith("freebsd"):
603 return module_present_freebsd(module, load)
604
605
606 def version_cmp(v1, v2):
607 """
608 Compare two version strings and returns:
609
610 * `-1`: if `v1` is less than `v2`
611 * `0`: if `v1` is equal to `v2`
612 * `1`: if `v1` is greater than `v2`
613
614 Raises `ValueError` if versions are not well formated.
615 """
616 vregex = r"(?P<whole>\d+(\.(\d+))*)"
617 v1m = re.match(vregex, v1)
618 v2m = re.match(vregex, v2)
619 if v1m is None or v2m is None:
620 raise ValueError("got a invalid version string")
621
622 # Split values
623 v1g = v1m.group("whole").split(".")
624 v2g = v2m.group("whole").split(".")
625
626 # Get the longest version string
627 vnum = len(v1g)
628 if len(v2g) > vnum:
629 vnum = len(v2g)
630
631 # Reverse list because we are going to pop the tail
632 v1g.reverse()
633 v2g.reverse()
634 for _ in range(vnum):
635 try:
636 v1n = int(v1g.pop())
637 except IndexError:
638 while v2g:
639 v2n = int(v2g.pop())
640 if v2n > 0:
641 return -1
642 break
643
644 try:
645 v2n = int(v2g.pop())
646 except IndexError:
647 if v1n > 0:
648 return 1
649 while v1g:
650 v1n = int(v1g.pop())
651 if v1n > 0:
652 return 1
653 break
654
655 if v1n > v2n:
656 return 1
657 if v1n < v2n:
658 return -1
659 return 0
660
661
662 def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None):
663 if ifaceaction:
664 str_ifaceaction = "no shutdown"
665 else:
666 str_ifaceaction = "shutdown"
667 if vrf_name == None:
668 cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
669 ifacename, str_ifaceaction
670 )
671 else:
672 cmd = (
673 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
674 ifacename, vrf_name, str_ifaceaction
675 )
676 )
677 node.run(cmd)
678
679
680 def ip4_route_zebra(node, vrf_name=None):
681 """
682 Gets an output of 'show ip route' command. It can be used
683 with comparing the output to a reference
684 """
685 if vrf_name == None:
686 tmp = node.vtysh_cmd("show ip route")
687 else:
688 tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name))
689 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
690
691 lines = output.splitlines()
692 header_found = False
693 while lines and (not lines[0].strip() or not header_found):
694 if "o - offload failure" in lines[0]:
695 header_found = True
696 lines = lines[1:]
697 return "\n".join(lines)
698
699
700 def ip6_route_zebra(node, vrf_name=None):
701 """
702 Retrieves the output of 'show ipv6 route [vrf vrf_name]', then
703 canonicalizes it by eliding link-locals.
704 """
705
706 if vrf_name == None:
707 tmp = node.vtysh_cmd("show ipv6 route")
708 else:
709 tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name))
710
711 # Mask out timestamp
712 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
713
714 # Mask out the link-local addresses
715 output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output)
716
717 lines = output.splitlines()
718 header_found = False
719 while lines and (not lines[0].strip() or not header_found):
720 if "o - offload failure" in lines[0]:
721 header_found = True
722 lines = lines[1:]
723
724 return "\n".join(lines)
725
726
727 def proto_name_to_number(protocol):
728 return {
729 "bgp": "186",
730 "isis": "187",
731 "ospf": "188",
732 "rip": "189",
733 "ripng": "190",
734 "nhrp": "191",
735 "eigrp": "192",
736 "ldp": "193",
737 "sharp": "194",
738 "pbr": "195",
739 "static": "196",
740 }.get(
741 protocol, protocol
742 ) # default return same as input
743
744
745 def ip4_route(node):
746 """
747 Gets a structured return of the command 'ip route'. It can be used in
748 conjuction with json_cmp() to provide accurate assert explanations.
749
750 Return example:
751 {
752 '10.0.1.0/24': {
753 'dev': 'eth0',
754 'via': '172.16.0.1',
755 'proto': '188',
756 },
757 '10.0.2.0/24': {
758 'dev': 'eth1',
759 'proto': 'kernel',
760 }
761 }
762 """
763 output = normalize_text(node.run("ip route")).splitlines()
764 result = {}
765 for line in output:
766 columns = line.split(" ")
767 route = result[columns[0]] = {}
768 prev = None
769 for column in columns:
770 if prev == "dev":
771 route["dev"] = column
772 if prev == "via":
773 route["via"] = column
774 if prev == "proto":
775 # translate protocol names back to numbers
776 route["proto"] = proto_name_to_number(column)
777 if prev == "metric":
778 route["metric"] = column
779 if prev == "scope":
780 route["scope"] = column
781 prev = column
782
783 return result
784
785
786 def ip4_vrf_route(node):
787 """
788 Gets a structured return of the command 'ip route show vrf {0}-cust1'.
789 It can be used in conjuction with json_cmp() to provide accurate assert explanations.
790
791 Return example:
792 {
793 '10.0.1.0/24': {
794 'dev': 'eth0',
795 'via': '172.16.0.1',
796 'proto': '188',
797 },
798 '10.0.2.0/24': {
799 'dev': 'eth1',
800 'proto': 'kernel',
801 }
802 }
803 """
804 output = normalize_text(
805 node.run("ip route show vrf {0}-cust1".format(node.name))
806 ).splitlines()
807
808 result = {}
809 for line in output:
810 columns = line.split(" ")
811 route = result[columns[0]] = {}
812 prev = None
813 for column in columns:
814 if prev == "dev":
815 route["dev"] = column
816 if prev == "via":
817 route["via"] = column
818 if prev == "proto":
819 # translate protocol names back to numbers
820 route["proto"] = proto_name_to_number(column)
821 if prev == "metric":
822 route["metric"] = column
823 if prev == "scope":
824 route["scope"] = column
825 prev = column
826
827 return result
828
829
830 def ip6_route(node):
831 """
832 Gets a structured return of the command 'ip -6 route'. It can be used in
833 conjuction with json_cmp() to provide accurate assert explanations.
834
835 Return example:
836 {
837 '2001:db8:1::/64': {
838 'dev': 'eth0',
839 'proto': '188',
840 },
841 '2001:db8:2::/64': {
842 'dev': 'eth1',
843 'proto': 'kernel',
844 }
845 }
846 """
847 output = normalize_text(node.run("ip -6 route")).splitlines()
848 result = {}
849 for line in output:
850 columns = line.split(" ")
851 route = result[columns[0]] = {}
852 prev = None
853 for column in columns:
854 if prev == "dev":
855 route["dev"] = column
856 if prev == "via":
857 route["via"] = column
858 if prev == "proto":
859 # translate protocol names back to numbers
860 route["proto"] = proto_name_to_number(column)
861 if prev == "metric":
862 route["metric"] = column
863 if prev == "pref":
864 route["pref"] = column
865 prev = column
866
867 return result
868
869
870 def ip6_vrf_route(node):
871 """
872 Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'.
873 It can be used in conjuction with json_cmp() to provide accurate assert explanations.
874
875 Return example:
876 {
877 '2001:db8:1::/64': {
878 'dev': 'eth0',
879 'proto': '188',
880 },
881 '2001:db8:2::/64': {
882 'dev': 'eth1',
883 'proto': 'kernel',
884 }
885 }
886 """
887 output = normalize_text(
888 node.run("ip -6 route show vrf {0}-cust1".format(node.name))
889 ).splitlines()
890 result = {}
891 for line in output:
892 columns = line.split(" ")
893 route = result[columns[0]] = {}
894 prev = None
895 for column in columns:
896 if prev == "dev":
897 route["dev"] = column
898 if prev == "via":
899 route["via"] = column
900 if prev == "proto":
901 # translate protocol names back to numbers
902 route["proto"] = proto_name_to_number(column)
903 if prev == "metric":
904 route["metric"] = column
905 if prev == "pref":
906 route["pref"] = column
907 prev = column
908
909 return result
910
911
912 def ip_rules(node):
913 """
914 Gets a structured return of the command 'ip rule'. It can be used in
915 conjuction with json_cmp() to provide accurate assert explanations.
916
917 Return example:
918 [
919 {
920 "pref": "0"
921 "from": "all"
922 },
923 {
924 "pref": "32766"
925 "from": "all"
926 },
927 {
928 "to": "3.4.5.0/24",
929 "iif": "r1-eth2",
930 "pref": "304",
931 "from": "1.2.0.0/16",
932 "proto": "zebra"
933 }
934 ]
935 """
936 output = normalize_text(node.run("ip rule")).splitlines()
937 result = []
938 for line in output:
939 columns = line.split(" ")
940
941 route = {}
942 # remove last character, since it is ':'
943 pref = columns[0][:-1]
944 route["pref"] = pref
945 prev = None
946 for column in columns:
947 if prev == "from":
948 route["from"] = column
949 if prev == "to":
950 route["to"] = column
951 if prev == "proto":
952 route["proto"] = column
953 if prev == "iif":
954 route["iif"] = column
955 if prev == "fwmark":
956 route["fwmark"] = column
957 prev = column
958
959 result.append(route)
960 return result
961
962
963 def sleep(amount, reason=None):
964 """
965 Sleep wrapper that registers in the log the amount of sleep
966 """
967 if reason is None:
968 logger.info("Sleeping for {} seconds".format(amount))
969 else:
970 logger.info(reason + " ({} seconds)".format(amount))
971
972 time.sleep(amount)
973
974
975 def checkAddressSanitizerError(output, router, component, logdir=""):
976 "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
977
978 def processAddressSanitizerError(asanErrorRe, output, router, component):
979 sys.stderr.write(
980 "%s: %s triggered an exception by AddressSanitizer\n" % (router, component)
981 )
982 # Sanitizer Error found in log
983 pidMark = asanErrorRe.group(1)
984 addressSanitizerLog = re.search(
985 "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL
986 )
987 if addressSanitizerLog:
988 # Find Calling Test. Could be multiple steps back
989 testframe = sys._current_frames().values()[0]
990 level = 0
991 while level < 10:
992 test = os.path.splitext(
993 os.path.basename(testframe.f_globals["__file__"])
994 )[0]
995 if (test != "topotest") and (test != "topogen"):
996 # Found the calling test
997 callingTest = os.path.basename(testframe.f_globals["__file__"])
998 break
999 level = level + 1
1000 testframe = testframe.f_back
1001 if level >= 10:
1002 # somehow couldn't find the test script.
1003 callingTest = "unknownTest"
1004 #
1005 # Now finding Calling Procedure
1006 level = 0
1007 while level < 20:
1008 callingProc = sys._getframe(level).f_code.co_name
1009 if (
1010 (callingProc != "processAddressSanitizerError")
1011 and (callingProc != "checkAddressSanitizerError")
1012 and (callingProc != "checkRouterCores")
1013 and (callingProc != "stopRouter")
1014 and (callingProc != "stop")
1015 and (callingProc != "stop_topology")
1016 and (callingProc != "checkRouterRunning")
1017 and (callingProc != "check_router_running")
1018 and (callingProc != "routers_have_failure")
1019 ):
1020 # Found the calling test
1021 break
1022 level = level + 1
1023 if level >= 20:
1024 # something wrong - couldn't found the calling test function
1025 callingProc = "unknownProc"
1026 with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile:
1027 sys.stderr.write(
1028 "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1029 % (callingTest, callingProc, router)
1030 )
1031 sys.stderr.write(
1032 "\n".join(addressSanitizerLog.group(1).splitlines()) + "\n"
1033 )
1034 addrSanFile.write("## Error: %s\n\n" % asanErrorRe.group(2))
1035 addrSanFile.write(
1036 "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1037 % (callingTest, callingProc, router)
1038 )
1039 addrSanFile.write(
1040 " "
1041 + "\n ".join(addressSanitizerLog.group(1).splitlines())
1042 + "\n"
1043 )
1044 addrSanFile.write("\n---------------\n")
1045 return
1046
1047 addressSanitizerError = re.search(
1048 r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
1049 )
1050 if addressSanitizerError:
1051 processAddressSanitizerError(addressSanitizerError, output, router, component)
1052 return True
1053
1054 # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
1055 if logdir:
1056 filepattern = logdir + "/" + router + "/" + component + ".asan.*"
1057 logger.debug(
1058 "Log check for %s on %s, pattern %s\n" % (component, router, filepattern)
1059 )
1060 for file in glob.glob(filepattern):
1061 with open(file, "r") as asanErrorFile:
1062 asanError = asanErrorFile.read()
1063 addressSanitizerError = re.search(
1064 r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
1065 )
1066 if addressSanitizerError:
1067 processAddressSanitizerError(
1068 addressSanitizerError, asanError, router, component
1069 )
1070 return True
1071 return False
1072
1073
1074 def _sysctl_atleast(commander, variable, min_value):
1075 if isinstance(min_value, tuple):
1076 min_value = list(min_value)
1077 is_list = isinstance(min_value, list)
1078
1079 sval = commander.cmd_raises("sysctl -n " + variable).strip()
1080 if is_list:
1081 cur_val = [int(x) for x in sval.split()]
1082 else:
1083 cur_val = int(sval)
1084
1085 set_value = False
1086 if is_list:
1087 for i, v in enumerate(cur_val):
1088 if v < min_value[i]:
1089 set_value = True
1090 else:
1091 min_value[i] = v
1092 else:
1093 if cur_val < min_value:
1094 set_value = True
1095 if set_value:
1096 if is_list:
1097 valstr = " ".join([str(x) for x in min_value])
1098 else:
1099 valstr = str(min_value)
1100 logger.info("Increasing sysctl %s from %s to %s", variable, cur_val, valstr)
1101 commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
1102
1103
1104 def _sysctl_assure(commander, variable, value):
1105 if isinstance(value, tuple):
1106 value = list(value)
1107 is_list = isinstance(value, list)
1108
1109 sval = commander.cmd_raises("sysctl -n " + variable).strip()
1110 if is_list:
1111 cur_val = [int(x) for x in sval.split()]
1112 else:
1113 cur_val = sval
1114
1115 set_value = False
1116 if is_list:
1117 for i, v in enumerate(cur_val):
1118 if v != value[i]:
1119 set_value = True
1120 else:
1121 value[i] = v
1122 else:
1123 if cur_val != str(value):
1124 set_value = True
1125
1126 if set_value:
1127 if is_list:
1128 valstr = " ".join([str(x) for x in value])
1129 else:
1130 valstr = str(value)
1131 logger.info("Changing sysctl %s from %s to %s", variable, cur_val, valstr)
1132 commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
1133
1134
1135 def sysctl_atleast(commander, variable, min_value, raises=False):
1136 try:
1137 if commander is None:
1138 commander = micronet.Commander("topotest")
1139 return _sysctl_atleast(commander, variable, min_value)
1140 except subprocess.CalledProcessError as error:
1141 logger.warning(
1142 "%s: Failed to assure sysctl min value %s = %s",
1143 commander,
1144 variable,
1145 min_value,
1146 )
1147 if raises:
1148 raise
1149
1150
1151 def sysctl_assure(commander, variable, value, raises=False):
1152 try:
1153 if commander is None:
1154 commander = micronet.Commander("topotest")
1155 return _sysctl_assure(commander, variable, value)
1156 except subprocess.CalledProcessError as error:
1157 logger.warning(
1158 "%s: Failed to assure sysctl value %s = %s",
1159 commander,
1160 variable,
1161 value,
1162 exc_info=True,
1163 )
1164 if raises:
1165 raise
1166
1167
1168 def rlimit_atleast(rname, min_value, raises=False):
1169 try:
1170 cval = resource.getrlimit(rname)
1171 soft, hard = cval
1172 if soft < min_value:
1173 nval = (min_value, hard if min_value < hard else min_value)
1174 logger.info("Increasing rlimit %s from %s to %s", rname, cval, nval)
1175 resource.setrlimit(rname, nval)
1176 except subprocess.CalledProcessError as error:
1177 logger.warning(
1178 "Failed to assure rlimit [%s] = %s", rname, min_value, exc_info=True
1179 )
1180 if raises:
1181 raise
1182
1183
1184 def fix_netns_limits(ns):
1185
1186 # Maximum read and write socket buffer sizes
1187 sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20])
1188 sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20])
1189
1190 sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0)
1191 sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0)
1192 sysctl_assure(ns, "net.ipv4.conf.lo.rp_filter", 0)
1193
1194 sysctl_assure(ns, "net.ipv4.conf.all.forwarding", 1)
1195 sysctl_assure(ns, "net.ipv4.conf.default.forwarding", 1)
1196
1197 # XXX if things fail look here as this wasn't done previously
1198 sysctl_assure(ns, "net.ipv6.conf.all.forwarding", 1)
1199 sysctl_assure(ns, "net.ipv6.conf.default.forwarding", 1)
1200
1201 # ARP
1202 sysctl_assure(ns, "net.ipv4.conf.default.arp_announce", 2)
1203 sysctl_assure(ns, "net.ipv4.conf.default.arp_notify", 1)
1204 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1205 sysctl_assure(ns, "net.ipv4.conf.default.arp_ignore", 0)
1206 sysctl_assure(ns, "net.ipv4.conf.all.arp_announce", 2)
1207 sysctl_assure(ns, "net.ipv4.conf.all.arp_notify", 1)
1208 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1209 sysctl_assure(ns, "net.ipv4.conf.all.arp_ignore", 0)
1210
1211 sysctl_assure(ns, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
1212
1213 # Keep ipv6 permanent addresses on an admin down
1214 sysctl_assure(ns, "net.ipv6.conf.all.keep_addr_on_down", 1)
1215 if version_cmp(platform.release(), "4.20") >= 0:
1216 sysctl_assure(ns, "net.ipv6.route.skip_notify_on_dev_down", 1)
1217
1218 sysctl_assure(ns, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
1219 sysctl_assure(ns, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
1220
1221 # igmp
1222 sysctl_atleast(ns, "net.ipv4.igmp_max_memberships", 1000)
1223
1224 # Use neigh information on selection of nexthop for multipath hops
1225 sysctl_assure(ns, "net.ipv4.fib_multipath_use_neigh", 1)
1226
1227
1228 def fix_host_limits():
1229 """Increase system limits."""
1230
1231 rlimit_atleast(resource.RLIMIT_NPROC, 8 * 1024)
1232 rlimit_atleast(resource.RLIMIT_NOFILE, 16 * 1024)
1233 sysctl_atleast(None, "fs.file-max", 16 * 1024)
1234 sysctl_atleast(None, "kernel.pty.max", 16 * 1024)
1235
1236 # Enable coredumps
1237 # Original on ubuntu 17.x, but apport won't save as in namespace
1238 # |/usr/share/apport/apport %p %s %c %d %P
1239 sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
1240 sysctl_assure(None, "kernel.core_uses_pid", 1)
1241 sysctl_assure(None, "fs.suid_dumpable", 1)
1242
1243 # Maximum connection backlog
1244 sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
1245
1246 # Maximum read and write socket buffer sizes
1247 sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20)
1248 sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20)
1249
1250 # Garbage Collection Settings for ARP and Neighbors
1251 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
1252 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024)
1253 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024)
1254 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024)
1255 # Hold entries for 10 minutes
1256 sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1257 sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1258
1259 # igmp
1260 sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
1261
1262 # MLD
1263 sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
1264
1265 # Increase routing table size to 128K
1266 sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024)
1267 sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024)
1268
1269
1270 def setup_node_tmpdir(logdir, name):
1271 # Cleanup old log, valgrind, and core files.
1272 subprocess.check_call(
1273 "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir, name), shell=True
1274 )
1275
1276 # Setup the per node directory.
1277 nodelogdir = "{}/{}".format(logdir, name)
1278 subprocess.check_call(
1279 "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True
1280 )
1281 logfile = "{0}/{1}.log".format(logdir, name)
1282 return logfile
1283
1284
1285 class Router(Node):
1286 "A Node with IPv4/IPv6 forwarding enabled"
1287
1288 def __init__(self, name, **params):
1289
1290 # Backward compatibility:
1291 # Load configuration defaults like topogen.
1292 self.config_defaults = configparser.ConfigParser(
1293 defaults={
1294 "verbosity": "info",
1295 "frrdir": "/usr/lib/frr",
1296 "routertype": "frr",
1297 "memleak_path": "",
1298 }
1299 )
1300
1301 self.config_defaults.read(
1302 os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini")
1303 )
1304
1305 # If this topology is using old API and doesn't have logdir
1306 # specified, then attempt to generate an unique logdir.
1307 self.logdir = params.get("logdir")
1308 if self.logdir is None:
1309 self.logdir = get_logs_path(g_extra_config["rundir"])
1310
1311 if not params.get("logger"):
1312 # If logger is present topogen has already set this up
1313 logfile = setup_node_tmpdir(self.logdir, name)
1314 l = topolog.get_logger(name, log_level="debug", target=logfile)
1315 params["logger"] = l
1316
1317 super(Router, self).__init__(name, **params)
1318
1319 self.daemondir = None
1320 self.hasmpls = False
1321 self.routertype = "frr"
1322 self.unified_config = None
1323 self.daemons = {
1324 "zebra": 0,
1325 "ripd": 0,
1326 "ripngd": 0,
1327 "ospfd": 0,
1328 "ospf6d": 0,
1329 "isisd": 0,
1330 "bgpd": 0,
1331 "pimd": 0,
1332 "ldpd": 0,
1333 "eigrpd": 0,
1334 "nhrpd": 0,
1335 "staticd": 0,
1336 "bfdd": 0,
1337 "sharpd": 0,
1338 "babeld": 0,
1339 "pbrd": 0,
1340 "pathd": 0,
1341 "snmpd": 0,
1342 }
1343 self.daemons_options = {"zebra": ""}
1344 self.reportCores = True
1345 self.version = None
1346
1347 self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid)
1348 try:
1349 # Allow escaping from running inside docker
1350 cgroup = open("/proc/1/cgroup").read()
1351 m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup)
1352 if m:
1353 self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd
1354 except IOError:
1355 pass
1356 else:
1357 logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd))
1358
1359 def _config_frr(self, **params):
1360 "Configure FRR binaries"
1361 self.daemondir = params.get("frrdir")
1362 if self.daemondir is None:
1363 self.daemondir = self.config_defaults.get("topogen", "frrdir")
1364
1365 zebra_path = os.path.join(self.daemondir, "zebra")
1366 if not os.path.isfile(zebra_path):
1367 raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
1368
1369 # pylint: disable=W0221
1370 # Some params are only meaningful for the parent class.
1371 def config(self, **params):
1372 super(Router, self).config(**params)
1373
1374 # User did not specify the daemons directory, try to autodetect it.
1375 self.daemondir = params.get("daemondir")
1376 if self.daemondir is None:
1377 self.routertype = params.get(
1378 "routertype", self.config_defaults.get("topogen", "routertype")
1379 )
1380 self._config_frr(**params)
1381 else:
1382 # Test the provided path
1383 zpath = os.path.join(self.daemondir, "zebra")
1384 if not os.path.isfile(zpath):
1385 raise Exception("No zebra binary found in {}".format(zpath))
1386 # Allow user to specify routertype when the path was specified.
1387 if params.get("routertype") is not None:
1388 self.routertype = params.get("routertype")
1389
1390 # Set ownership of config files
1391 self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype))
1392
1393 def terminate(self):
1394 # Stop running FRR daemons
1395 self.stopRouter()
1396 super(Router, self).terminate()
1397 os.system("chmod -R go+rw " + self.logdir)
1398
1399 # Return count of running daemons
1400 def listDaemons(self):
1401 ret = []
1402 rc, stdout, _ = self.cmd_status(
1403 "ls -1 /var/run/%s/*.pid" % self.routertype, warn=False
1404 )
1405 if rc:
1406 return ret
1407 for d in stdout.strip().split("\n"):
1408 pidfile = d.strip()
1409 try:
1410 pid = int(self.cmd_raises("cat %s" % pidfile, warn=False).strip())
1411 name = os.path.basename(pidfile[:-4])
1412
1413 # probably not compatible with bsd.
1414 rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False)
1415 if rc:
1416 logger.warning(
1417 "%s: %s exited leaving pidfile %s (%s)",
1418 self.name,
1419 name,
1420 pidfile,
1421 pid,
1422 )
1423 self.cmd("rm -- " + pidfile)
1424 else:
1425 ret.append((name, pid))
1426 except (subprocess.CalledProcessError, ValueError):
1427 pass
1428 return ret
1429
1430 def stopRouter(self, assertOnError=True, minErrorVersion="5.1"):
1431 # Stop Running FRR Daemons
1432 running = self.listDaemons()
1433 if not running:
1434 return ""
1435
1436 logger.info("%s: stopping %s", self.name, ", ".join([x[0] for x in running]))
1437 for name, pid in running:
1438 logger.info("{}: sending SIGTERM to {}".format(self.name, name))
1439 try:
1440 os.kill(pid, signal.SIGTERM)
1441 except OSError as err:
1442 logger.info(
1443 "%s: could not kill %s (%s): %s", self.name, name, pid, str(err)
1444 )
1445
1446 running = self.listDaemons()
1447 if running:
1448 for _ in range(0, 30):
1449 sleep(
1450 0.5,
1451 "{}: waiting for daemons stopping: {}".format(
1452 self.name, ", ".join([x[0] for x in running])
1453 ),
1454 )
1455 running = self.listDaemons()
1456 if not running:
1457 break
1458
1459 if not running:
1460 return ""
1461
1462 logger.warning(
1463 "%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running])
1464 )
1465 for name, pid in running:
1466 pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
1467 logger.info("%s: killing %s", self.name, name)
1468 self.cmd("kill -SIGBUS %d" % pid)
1469 self.cmd("rm -- " + pidfile)
1470
1471 sleep(
1472 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name
1473 )
1474
1475 errors = self.checkRouterCores(reportOnce=True)
1476 if self.checkRouterVersion("<", minErrorVersion):
1477 # ignore errors in old versions
1478 errors = ""
1479 if assertOnError and (errors is not None) and len(errors) > 0:
1480 assert "Errors found - details follow:" == 0, errors
1481 return errors
1482
1483 def removeIPs(self):
1484 for interface in self.intfNames():
1485 try:
1486 self.intf_ip_cmd(interface, "ip address flush " + interface)
1487 except Exception as ex:
1488 logger.error("%s can't remove IPs %s", self, str(ex))
1489 # pdb.set_trace()
1490 # assert False, "can't remove IPs %s" % str(ex)
1491
1492 def checkCapability(self, daemon, param):
1493 if param is not None:
1494 daemon_path = os.path.join(self.daemondir, daemon)
1495 daemon_search_option = param.replace("-", "")
1496 output = self.cmd(
1497 "{0} -h | grep {1}".format(daemon_path, daemon_search_option)
1498 )
1499 if daemon_search_option not in output:
1500 return False
1501 return True
1502
1503 def loadConf(self, daemon, source=None, param=None):
1504 """Enabled and set config for a daemon.
1505
1506 Arranges for loading of daemon configuration from the specified source. Possible
1507 `source` values are `None` for an empty config file, a path name which is used
1508 directly, or a file name with no path components which is first looked for
1509 directly and then looked for under a sub-directory named after router.
1510 """
1511
1512 # Unfortunately this API allowsfor source to not exist for any and all routers.
1513 if source:
1514 head, tail = os.path.split(source)
1515 if not head and not self.path_exists(tail):
1516 script_dir = os.environ["PYTEST_TOPOTEST_SCRIPTDIR"]
1517 router_relative = os.path.join(script_dir, self.name, tail)
1518 if self.path_exists(router_relative):
1519 source = router_relative
1520 self.logger.info(
1521 "using router relative configuration: {}".format(source)
1522 )
1523
1524 # print "Daemons before:", self.daemons
1525 if daemon in self.daemons.keys() or daemon == "frr":
1526 if daemon == "frr":
1527 self.unified_config = 1
1528 else:
1529 self.daemons[daemon] = 1
1530 if param is not None:
1531 self.daemons_options[daemon] = param
1532 conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon)
1533 if source is None or not os.path.exists(source):
1534 if daemon == "frr" or not self.unified_config:
1535 self.cmd_raises("rm -f " + conf_file)
1536 self.cmd_raises("touch " + conf_file)
1537 else:
1538 self.cmd_raises("cp {} {}".format(source, conf_file))
1539
1540 if not self.unified_config or daemon == "frr":
1541 self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file))
1542 self.cmd_raises("chmod 664 {}".format(conf_file))
1543
1544 if (daemon == "snmpd") and (self.routertype == "frr"):
1545 # /etc/snmp is private mount now
1546 self.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf')
1547 self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
1548
1549 if (daemon == "zebra") and (self.daemons["staticd"] == 0):
1550 # Add staticd with zebra - if it exists
1551 try:
1552 staticd_path = os.path.join(self.daemondir, "staticd")
1553 except:
1554 pdb.set_trace()
1555
1556 if os.path.isfile(staticd_path):
1557 self.daemons["staticd"] = 1
1558 self.daemons_options["staticd"] = ""
1559 # Auto-Started staticd has no config, so it will read from zebra config
1560 else:
1561 logger.info("No daemon {} known".format(daemon))
1562 # print "Daemons after:", self.daemons
1563
1564 def runInWindow(self, cmd, title=None):
1565 return self.run_in_window(cmd, title)
1566
1567 def startRouter(self, tgen=None):
1568 if self.unified_config:
1569 self.cmd(
1570 'echo "service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1571 % self.routertype
1572 )
1573 else:
1574 # Disable integrated-vtysh-config
1575 self.cmd(
1576 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1577 % self.routertype
1578 )
1579
1580 self.cmd(
1581 "chown %s:%svty /etc/%s/vtysh.conf"
1582 % (self.routertype, self.routertype, self.routertype)
1583 )
1584 # TODO remove the following lines after all tests are migrated to Topogen.
1585 # Try to find relevant old logfiles in /tmp and delete them
1586 map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
1587 # Remove old core files
1588 map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name)))
1589 # Remove IP addresses from OS first - we have them in zebra.conf
1590 self.removeIPs()
1591 # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
1592 # No error - but return message and skip all the tests
1593 if self.daemons["ldpd"] == 1:
1594 ldpd_path = os.path.join(self.daemondir, "ldpd")
1595 if not os.path.isfile(ldpd_path):
1596 logger.info("LDP Test, but no ldpd compiled or installed")
1597 return "LDP Test, but no ldpd compiled or installed"
1598
1599 if version_cmp(platform.release(), "4.5") < 0:
1600 logger.info("LDP Test need Linux Kernel 4.5 minimum")
1601 return "LDP Test need Linux Kernel 4.5 minimum"
1602 # Check if have mpls
1603 if tgen != None:
1604 self.hasmpls = tgen.hasmpls
1605 if self.hasmpls != True:
1606 logger.info(
1607 "LDP/MPLS Tests will be skipped, platform missing module(s)"
1608 )
1609 else:
1610 # Test for MPLS Kernel modules available
1611 self.hasmpls = False
1612 if not module_present("mpls-router"):
1613 logger.info(
1614 "MPLS tests will not run (missing mpls-router kernel module)"
1615 )
1616 elif not module_present("mpls-iptunnel"):
1617 logger.info(
1618 "MPLS tests will not run (missing mpls-iptunnel kernel module)"
1619 )
1620 else:
1621 self.hasmpls = True
1622 if self.hasmpls != True:
1623 return "LDP/MPLS Tests need mpls kernel modules"
1624
1625 # Really want to use sysctl_atleast here, but only when MPLS is actually being
1626 # used
1627 self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
1628
1629 shell_routers = g_extra_config["shell"]
1630 if "all" in shell_routers or self.name in shell_routers:
1631 self.run_in_window(os.getenv("SHELL", "bash"), title="sh-%s" % self.name)
1632
1633 if self.daemons["eigrpd"] == 1:
1634 eigrpd_path = os.path.join(self.daemondir, "eigrpd")
1635 if not os.path.isfile(eigrpd_path):
1636 logger.info("EIGRP Test, but no eigrpd compiled or installed")
1637 return "EIGRP Test, but no eigrpd compiled or installed"
1638
1639 if self.daemons["bfdd"] == 1:
1640 bfdd_path = os.path.join(self.daemondir, "bfdd")
1641 if not os.path.isfile(bfdd_path):
1642 logger.info("BFD Test, but no bfdd compiled or installed")
1643 return "BFD Test, but no bfdd compiled or installed"
1644
1645 status = self.startRouterDaemons(tgen=tgen)
1646
1647 vtysh_routers = g_extra_config["vtysh"]
1648 if "all" in vtysh_routers or self.name in vtysh_routers:
1649 self.run_in_window("vtysh", title="vt-%s" % self.name)
1650
1651 if self.unified_config:
1652 self.cmd("vtysh -f /etc/frr/frr.conf")
1653
1654 return status
1655
1656 def getStdErr(self, daemon):
1657 return self.getLog("err", daemon)
1658
1659 def getStdOut(self, daemon):
1660 return self.getLog("out", daemon)
1661
1662 def getLog(self, log, daemon):
1663 return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log))
1664
1665 def startRouterDaemons(self, daemons=None, tgen=None):
1666 "Starts FRR daemons for this router."
1667
1668 asan_abort = g_extra_config["asan_abort"]
1669 gdb_breakpoints = g_extra_config["gdb_breakpoints"]
1670 gdb_daemons = g_extra_config["gdb_daemons"]
1671 gdb_routers = g_extra_config["gdb_routers"]
1672 valgrind_extra = g_extra_config["valgrind_extra"]
1673 valgrind_memleaks = g_extra_config["valgrind_memleaks"]
1674 strace_daemons = g_extra_config["strace_daemons"]
1675
1676 # Get global bundle data
1677 if not self.path_exists("/etc/frr/support_bundle_commands.conf"):
1678 # Copy global value if was covered by namespace mount
1679 bundle_data = ""
1680 if os.path.exists("/etc/frr/support_bundle_commands.conf"):
1681 with open("/etc/frr/support_bundle_commands.conf", "r") as rf:
1682 bundle_data = rf.read()
1683 self.cmd_raises(
1684 "cat > /etc/frr/support_bundle_commands.conf",
1685 stdin=bundle_data,
1686 )
1687
1688 # Starts actual daemons without init (ie restart)
1689 # cd to per node directory
1690 self.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self.logdir, self.name))
1691 self.set_cwd("{}/{}".format(self.logdir, self.name))
1692 self.cmd("umask 000")
1693
1694 # Re-enable to allow for report per run
1695 self.reportCores = True
1696
1697 # XXX: glue code forward ported from removed function.
1698 if self.version == None:
1699 self.version = self.cmd(
1700 os.path.join(self.daemondir, "bgpd") + " -v"
1701 ).split()[2]
1702 logger.info("{}: running version: {}".format(self.name, self.version))
1703 # If `daemons` was specified then some upper API called us with
1704 # specific daemons, otherwise just use our own configuration.
1705 daemons_list = []
1706 if daemons is not None:
1707 daemons_list = daemons
1708 else:
1709 # Append all daemons configured.
1710 for daemon in self.daemons:
1711 if self.daemons[daemon] == 1:
1712 daemons_list.append(daemon)
1713
1714 def start_daemon(daemon, extra_opts=None):
1715 daemon_opts = self.daemons_options.get(daemon, "")
1716 rediropt = " > {0}.out 2> {0}.err".format(daemon)
1717 if daemon == "snmpd":
1718 binary = "/usr/sbin/snmpd"
1719 cmdenv = ""
1720 cmdopt = "{} -C -c /etc/frr/snmpd.conf -p ".format(
1721 daemon_opts
1722 ) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self.routertype)
1723 else:
1724 binary = os.path.join(self.daemondir, daemon)
1725
1726 cmdenv = "ASAN_OPTIONS="
1727 if asan_abort:
1728 cmdenv = "abort_on_error=1:"
1729 cmdenv += "log_path={0}/{1}.{2}.asan ".format(
1730 self.logdir, self.name, daemon
1731 )
1732
1733 if valgrind_memleaks:
1734 this_dir = os.path.dirname(
1735 os.path.abspath(os.path.realpath(__file__))
1736 )
1737 supp_file = os.path.abspath(
1738 os.path.join(this_dir, "../../../tools/valgrind.supp")
1739 )
1740 cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
1741 daemon, self.logdir, self.name, supp_file
1742 )
1743 if valgrind_extra:
1744 cmdenv += (
1745 " --gen-suppressions=all --expensive-definedness-checks=yes"
1746 )
1747 elif daemon in strace_daemons or "all" in strace_daemons:
1748 cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(
1749 daemon, self.logdir, self.name
1750 )
1751
1752 cmdopt = "{} --command-log-always --log file:{}.log --log-level debug".format(
1753 daemon_opts, daemon
1754 )
1755 if extra_opts:
1756 cmdopt += " " + extra_opts
1757
1758 if (
1759 (gdb_routers or gdb_daemons)
1760 and (
1761 not gdb_routers or self.name in gdb_routers or "all" in gdb_routers
1762 )
1763 and (not gdb_daemons or daemon in gdb_daemons or "all" in gdb_daemons)
1764 ):
1765 if daemon == "snmpd":
1766 cmdopt += " -f "
1767
1768 cmdopt += rediropt
1769 gdbcmd = "sudo -E gdb " + binary
1770 if gdb_breakpoints:
1771 gdbcmd += " -ex 'set breakpoint pending on'"
1772 for bp in gdb_breakpoints:
1773 gdbcmd += " -ex 'b {}'".format(bp)
1774 gdbcmd += " -ex 'run {}'".format(cmdopt)
1775
1776 self.run_in_window(gdbcmd, daemon)
1777
1778 logger.info(
1779 "%s: %s %s launched in gdb window", self, self.routertype, daemon
1780 )
1781 else:
1782 if daemon != "snmpd":
1783 cmdopt += " -d "
1784 cmdopt += rediropt
1785
1786 try:
1787 self.cmd_raises(" ".join([cmdenv, binary, cmdopt]), warn=False)
1788 except subprocess.CalledProcessError as error:
1789 self.logger.error(
1790 '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
1791 self,
1792 daemon,
1793 error.returncode,
1794 error.cmd,
1795 '\n:stdout: "{}"'.format(error.stdout.strip())
1796 if error.stdout
1797 else "",
1798 '\n:stderr: "{}"'.format(error.stderr.strip())
1799 if error.stderr
1800 else "",
1801 )
1802 else:
1803 logger.info("%s: %s %s started", self, self.routertype, daemon)
1804
1805 # Start Zebra first
1806 if "zebra" in daemons_list:
1807 start_daemon("zebra", "-s 90000000")
1808 while "zebra" in daemons_list:
1809 daemons_list.remove("zebra")
1810
1811 # Start staticd next if required
1812 if "staticd" in daemons_list:
1813 start_daemon("staticd")
1814 while "staticd" in daemons_list:
1815 daemons_list.remove("staticd")
1816
1817 if "snmpd" in daemons_list:
1818 # Give zerbra a chance to configure interface addresses that snmpd daemon
1819 # may then use.
1820 time.sleep(2)
1821
1822 start_daemon("snmpd")
1823 while "snmpd" in daemons_list:
1824 daemons_list.remove("snmpd")
1825
1826 if daemons is None:
1827 # Fix Link-Local Addresses on initial startup
1828 # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
1829 _, output, _ = self.cmd_status(
1830 "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done",
1831 stderr=subprocess.STDOUT,
1832 )
1833 logger.debug("Set MACs:\n%s", output)
1834
1835 # Now start all the other daemons
1836 for daemon in daemons_list:
1837 if self.daemons[daemon] == 0:
1838 continue
1839 start_daemon(daemon)
1840
1841 # Check if daemons are running.
1842 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1843 if re.search(r"No such file or directory", rundaemons):
1844 return "Daemons are not running"
1845
1846 # Update the permissions on the log files
1847 self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name))
1848 self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name))
1849
1850 return ""
1851
1852 def killRouterDaemons(
1853 self, daemons, wait=True, assertOnError=True, minErrorVersion="5.1"
1854 ):
1855 # Kill Running FRR
1856 # Daemons(user specified daemon only) using SIGKILL
1857 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1858 errors = ""
1859 daemonsNotRunning = []
1860 if re.search(r"No such file or directory", rundaemons):
1861 return errors
1862 for daemon in daemons:
1863 if rundaemons is not None and daemon in rundaemons:
1864 numRunning = 0
1865 dmns = rundaemons.split("\n")
1866 # Exclude empty string at end of list
1867 for d in dmns[:-1]:
1868 if re.search(r"%s" % daemon, d):
1869 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1870 if daemonpid.isdigit() and pid_exists(int(daemonpid)):
1871 logger.info(
1872 "{}: killing {}".format(
1873 self.name,
1874 os.path.basename(d.rstrip().rsplit(".", 1)[0]),
1875 )
1876 )
1877 self.cmd("kill -9 %s" % daemonpid)
1878 if pid_exists(int(daemonpid)):
1879 numRunning += 1
1880 while wait and numRunning > 0:
1881 sleep(
1882 2,
1883 "{}: waiting for {} daemon to be stopped".format(
1884 self.name, daemon
1885 ),
1886 )
1887
1888 # 2nd round of kill if daemons didn't exit
1889 for d in dmns[:-1]:
1890 if re.search(r"%s" % daemon, d):
1891 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1892 if daemonpid.isdigit() and pid_exists(
1893 int(daemonpid)
1894 ):
1895 logger.info(
1896 "{}: killing {}".format(
1897 self.name,
1898 os.path.basename(
1899 d.rstrip().rsplit(".", 1)[0]
1900 ),
1901 )
1902 )
1903 self.cmd("kill -9 %s" % daemonpid)
1904 if daemonpid.isdigit() and not pid_exists(
1905 int(daemonpid)
1906 ):
1907 numRunning -= 1
1908 self.cmd("rm -- {}".format(d.rstrip()))
1909 if wait:
1910 errors = self.checkRouterCores(reportOnce=True)
1911 if self.checkRouterVersion("<", minErrorVersion):
1912 # ignore errors in old versions
1913 errors = ""
1914 if assertOnError and len(errors) > 0:
1915 assert "Errors found - details follow:" == 0, errors
1916 else:
1917 daemonsNotRunning.append(daemon)
1918 if len(daemonsNotRunning) > 0:
1919 errors = errors + "Daemons are not running", daemonsNotRunning
1920
1921 return errors
1922
1923 def checkRouterCores(self, reportLeaks=True, reportOnce=False):
1924 if reportOnce and not self.reportCores:
1925 return
1926 reportMade = False
1927 traces = ""
1928 for daemon in self.daemons:
1929 if self.daemons[daemon] == 1:
1930 # Look for core file
1931 corefiles = glob.glob(
1932 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
1933 )
1934 if len(corefiles) > 0:
1935 backtrace = gdb_core(self, daemon, corefiles)
1936 traces = (
1937 traces
1938 + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s"
1939 % (self.name, daemon, backtrace)
1940 )
1941 reportMade = True
1942 elif reportLeaks:
1943 log = self.getStdErr(daemon)
1944 if "memstats" in log:
1945 sys.stderr.write(
1946 "%s: %s has memory leaks:\n" % (self.name, daemon)
1947 )
1948 traces = traces + "\n%s: %s has memory leaks:\n" % (
1949 self.name,
1950 daemon,
1951 )
1952 log = re.sub("core_handler: ", "", log)
1953 log = re.sub(
1954 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
1955 r"\n ## \1",
1956 log,
1957 )
1958 log = re.sub("memstats: ", " ", log)
1959 sys.stderr.write(log)
1960 reportMade = True
1961 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
1962 if checkAddressSanitizerError(
1963 self.getStdErr(daemon), self.name, daemon, self.logdir
1964 ):
1965 sys.stderr.write(
1966 "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)
1967 )
1968 traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (
1969 self.name,
1970 daemon,
1971 )
1972 reportMade = True
1973 if reportMade:
1974 self.reportCores = False
1975 return traces
1976
1977 def checkRouterRunning(self):
1978 "Check if router daemons are running and collect crashinfo they don't run"
1979
1980 global fatal_error
1981
1982 daemonsRunning = self.cmd(
1983 'vtysh -c "show logging" | grep "Logging configuration for"'
1984 )
1985 # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
1986 if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"):
1987 return "%s: vtysh killed by AddressSanitizer" % (self.name)
1988
1989 for daemon in self.daemons:
1990 if daemon == "snmpd":
1991 continue
1992 if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
1993 sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
1994 if daemon == "staticd":
1995 sys.stderr.write(
1996 "You may have a copy of staticd installed but are attempting to test against\n"
1997 )
1998 sys.stderr.write(
1999 "a version of FRR that does not have staticd, please cleanup the install dir\n"
2000 )
2001
2002 # Look for core file
2003 corefiles = glob.glob(
2004 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
2005 )
2006 if len(corefiles) > 0:
2007 gdb_core(self, daemon, corefiles)
2008 else:
2009 # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
2010 if os.path.isfile(
2011 "{}/{}/{}.log".format(self.logdir, self.name, daemon)
2012 ):
2013 log_tail = subprocess.check_output(
2014 [
2015 "tail -n20 {}/{}/{}.log 2> /dev/null".format(
2016 self.logdir, self.name, daemon
2017 )
2018 ],
2019 shell=True,
2020 )
2021 sys.stderr.write(
2022 "\nFrom %s %s %s log file:\n"
2023 % (self.routertype, self.name, daemon)
2024 )
2025 sys.stderr.write("%s\n" % log_tail)
2026
2027 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2028 if checkAddressSanitizerError(
2029 self.getStdErr(daemon), self.name, daemon, self.logdir
2030 ):
2031 return "%s: Daemon %s not running - killed by AddressSanitizer" % (
2032 self.name,
2033 daemon,
2034 )
2035
2036 return "%s: Daemon %s not running" % (self.name, daemon)
2037 return ""
2038
2039 def checkRouterVersion(self, cmpop, version):
2040 """
2041 Compares router version using operation `cmpop` with `version`.
2042 Valid `cmpop` values:
2043 * `>=`: has the same version or greater
2044 * '>': has greater version
2045 * '=': has the same version
2046 * '<': has a lesser version
2047 * '<=': has the same version or lesser
2048
2049 Usage example: router.checkRouterVersion('>', '1.0')
2050 """
2051
2052 # Make sure we have version information first
2053 if self.version == None:
2054 self.version = self.cmd(
2055 os.path.join(self.daemondir, "bgpd") + " -v"
2056 ).split()[2]
2057 logger.info("{}: running version: {}".format(self.name, self.version))
2058
2059 rversion = self.version
2060 if rversion == None:
2061 return False
2062
2063 result = version_cmp(rversion, version)
2064 if cmpop == ">=":
2065 return result >= 0
2066 if cmpop == ">":
2067 return result > 0
2068 if cmpop == "=":
2069 return result == 0
2070 if cmpop == "<":
2071 return result < 0
2072 if cmpop == "<":
2073 return result < 0
2074 if cmpop == "<=":
2075 return result <= 0
2076
2077 def get_ipv6_linklocal(self):
2078 "Get LinkLocal Addresses from interfaces"
2079
2080 linklocal = []
2081
2082 ifaces = self.cmd("ip -6 address")
2083 # Fix newlines (make them all the same)
2084 ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
2085 interface = ""
2086 ll_per_if_count = 0
2087 for line in ifaces:
2088 m = re.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line)
2089 if m:
2090 interface = m.group(1)
2091 ll_per_if_count = 0
2092 m = re.search(
2093 "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
2094 line,
2095 )
2096 if m:
2097 local = m.group(1)
2098 ll_per_if_count += 1
2099 if ll_per_if_count > 1:
2100 linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
2101 else:
2102 linklocal += [[interface, local]]
2103 return linklocal
2104
2105 def daemon_available(self, daemon):
2106 "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
2107
2108 daemon_path = os.path.join(self.daemondir, daemon)
2109 if not os.path.isfile(daemon_path):
2110 return False
2111 if daemon == "ldpd":
2112 if version_cmp(platform.release(), "4.5") < 0:
2113 return False
2114 if not module_present("mpls-router", load=False):
2115 return False
2116 if not module_present("mpls-iptunnel", load=False):
2117 return False
2118 return True
2119
2120 def get_routertype(self):
2121 "Return the type of Router (frr)"
2122
2123 return self.routertype
2124
2125 def report_memory_leaks(self, filename_prefix, testscript):
2126 "Report Memory Leaks to file prefixed with given string"
2127
2128 leakfound = False
2129 filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt"
2130 for daemon in self.daemons:
2131 if self.daemons[daemon] == 1:
2132 log = self.getStdErr(daemon)
2133 if "memstats" in log:
2134 # Found memory leak
2135 logger.info(
2136 "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log)
2137 )
2138 if not leakfound:
2139 leakfound = True
2140 # Check if file already exists
2141 fileexists = os.path.isfile(filename)
2142 leakfile = open(filename, "a")
2143 if not fileexists:
2144 # New file - add header
2145 leakfile.write(
2146 "# Memory Leak Detection for topotest %s\n\n"
2147 % testscript
2148 )
2149 leakfile.write("## Router %s\n" % self.name)
2150 leakfile.write("### Process %s\n" % daemon)
2151 log = re.sub("core_handler: ", "", log)
2152 log = re.sub(
2153 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
2154 r"\n#### \1\n",
2155 log,
2156 )
2157 log = re.sub("memstats: ", " ", log)
2158 leakfile.write(log)
2159 leakfile.write("\n")
2160 if leakfound:
2161 leakfile.close()
2162
2163
2164 def frr_unicode(s):
2165 """Convert string to unicode, depending on python version"""
2166 if sys.version_info[0] > 2:
2167 return s
2168 else:
2169 return unicode(s) # pylint: disable=E0602
2170
2171
2172 def is_mapping(o):
2173 return isinstance(o, Mapping)