]> git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/lib/topotest.py
aeb83d42904547e999318e3c5c1f5c1153745e03
[mirror_frr.git] / tests / topotests / lib / topotest.py
1 #!/usr/bin/env python
2 # SPDX-License-Identifier: ISC
3
4 #
5 # topotest.py
6 # Library of helper functions for NetDEF Topology Tests
7 #
8 # Copyright (c) 2016 by
9 # Network Device Education Foundation, Inc. ("NetDEF")
10 #
11
12 import configparser
13 import difflib
14 import errno
15 import functools
16 import glob
17 import json
18 import os
19 import platform
20 import re
21 import resource
22 import signal
23 import subprocess
24 import sys
25 import tempfile
26 import time
27 from collections.abc import Mapping
28 from copy import deepcopy
29
30 import lib.topolog as topolog
31 from lib.micronet_compat import Node
32 from lib.topolog import logger
33 from munet.base import Timeout
34
35 from lib import micronet
36
37 g_pytest_config = None
38
39
40 def get_logs_path(rundir):
41 logspath = topolog.get_test_logdir()
42 return os.path.join(rundir, logspath)
43
44
45 def gdb_core(obj, daemon, corefiles):
46 gdbcmds = """
47 info threads
48 bt full
49 disassemble
50 up
51 disassemble
52 up
53 disassemble
54 up
55 disassemble
56 up
57 disassemble
58 up
59 disassemble
60 """
61 gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")]
62 gdbcmds = [item for sl in gdbcmds for item in sl]
63
64 daemon_path = os.path.join(obj.daemondir, daemon)
65 backtrace = subprocess.check_output(
66 ["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds
67 )
68 sys.stderr.write(
69 "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon)
70 )
71 sys.stderr.write("%s" % backtrace)
72 return backtrace
73
74
75 class json_cmp_result(object):
76 "json_cmp result class for better assertion messages"
77
78 def __init__(self):
79 self.errors = []
80
81 def add_error(self, error):
82 "Append error message to the result"
83 for line in error.splitlines():
84 self.errors.append(line)
85
86 def has_errors(self):
87 "Returns True if there were errors, otherwise False."
88 return len(self.errors) > 0
89
90 def gen_report(self):
91 headline = ["Generated JSON diff error report:", ""]
92 return headline + self.errors
93
94 def __str__(self):
95 return (
96 "Generated JSON diff error report:\n\n\n" + "\n".join(self.errors) + "\n\n"
97 )
98
99
100 def gen_json_diff_report(d1, d2, exact=False, path="> $", acc=(0, "")):
101 """
102 Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye.
103 """
104
105 def dump_json(v):
106 if isinstance(v, (dict, list)):
107 return "\t" + "\t".join(
108 json.dumps(v, indent=4, separators=(",", ": ")).splitlines(True)
109 )
110 else:
111 return "'{}'".format(v)
112
113 def json_type(v):
114 if isinstance(v, (list, tuple)):
115 return "Array"
116 elif isinstance(v, dict):
117 return "Object"
118 elif isinstance(v, (int, float)):
119 return "Number"
120 elif isinstance(v, bool):
121 return "Boolean"
122 elif isinstance(v, str):
123 return "String"
124 elif v == None:
125 return "null"
126
127 def get_errors(other_acc):
128 return other_acc[1]
129
130 def get_errors_n(other_acc):
131 return other_acc[0]
132
133 def add_error(acc, msg, points=1):
134 return (acc[0] + points, acc[1] + "{}: {}\n".format(path, msg))
135
136 def merge_errors(acc, other_acc):
137 return (acc[0] + other_acc[0], acc[1] + other_acc[1])
138
139 def add_idx(idx):
140 return "{}[{}]".format(path, idx)
141
142 def add_key(key):
143 return "{}->{}".format(path, key)
144
145 def has_errors(other_acc):
146 return other_acc[0] > 0
147
148 if d2 == "*" or (
149 not isinstance(d1, (list, dict))
150 and not isinstance(d2, (list, dict))
151 and d1 == d2
152 ):
153 return acc
154 elif (
155 not isinstance(d1, (list, dict))
156 and not isinstance(d2, (list, dict))
157 and d1 != d2
158 ):
159 acc = add_error(
160 acc,
161 "d1 has element with value '{}' but in d2 it has value '{}'".format(d1, d2),
162 )
163 elif (
164 isinstance(d1, list)
165 and isinstance(d2, list)
166 and ((len(d2) > 0 and d2[0] == "__ordered__") or exact)
167 ):
168 if not exact:
169 del d2[0]
170 if len(d1) != len(d2):
171 acc = add_error(
172 acc,
173 "d1 has Array of length {} but in d2 it is of length {}".format(
174 len(d1), len(d2)
175 ),
176 )
177 else:
178 for idx, v1, v2 in zip(range(0, len(d1)), d1, d2):
179 acc = merge_errors(
180 acc, gen_json_diff_report(v1, v2, exact=exact, path=add_idx(idx))
181 )
182 elif isinstance(d1, list) and isinstance(d2, list):
183 if len(d1) < len(d2):
184 acc = add_error(
185 acc,
186 "d1 has Array of length {} but in d2 it is of length {}".format(
187 len(d1), len(d2)
188 ),
189 )
190 else:
191 for idx2, v2 in zip(range(0, len(d2)), d2):
192 found_match = False
193 closest_diff = None
194 closest_idx = None
195 for idx1, v1 in zip(range(0, len(d1)), d1):
196 tmp_v1 = deepcopy(v1)
197 tmp_v2 = deepcopy(v2)
198 tmp_diff = gen_json_diff_report(tmp_v1, tmp_v2, path=add_idx(idx1))
199 if not has_errors(tmp_diff):
200 found_match = True
201 del d1[idx1]
202 break
203 elif not closest_diff or get_errors_n(tmp_diff) < get_errors_n(
204 closest_diff
205 ):
206 closest_diff = tmp_diff
207 closest_idx = idx1
208 if not found_match and isinstance(v2, (list, dict)):
209 sub_error = "\n\n\t{}".format(
210 "\t".join(get_errors(closest_diff).splitlines(True))
211 )
212 acc = add_error(
213 acc,
214 (
215 "d2 has the following element at index {} which is not present in d1: "
216 + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}"
217 ).format(idx2, dump_json(v2), closest_idx, sub_error),
218 )
219 if not found_match and not isinstance(v2, (list, dict)):
220 acc = add_error(
221 acc,
222 "d2 has the following element at index {} which is not present in d1: {}".format(
223 idx2, dump_json(v2)
224 ),
225 )
226 elif isinstance(d1, dict) and isinstance(d2, dict) and exact:
227 invalid_keys_d1 = [k for k in d1.keys() if k not in d2.keys()]
228 invalid_keys_d2 = [k for k in d2.keys() if k not in d1.keys()]
229 for k in invalid_keys_d1:
230 acc = add_error(acc, "d1 has key '{}' which is not present in d2".format(k))
231 for k in invalid_keys_d2:
232 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
233 valid_keys_intersection = [k for k in d1.keys() if k in d2.keys()]
234 for k in valid_keys_intersection:
235 acc = merge_errors(
236 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
237 )
238 elif isinstance(d1, dict) and isinstance(d2, dict):
239 none_keys = [k for k, v in d2.items() if v == None]
240 none_keys_present = [k for k in d1.keys() if k in none_keys]
241 for k in none_keys_present:
242 acc = add_error(
243 acc, "d1 has key '{}' which is not supposed to be present".format(k)
244 )
245 keys = [k for k, v in d2.items() if v != None]
246 invalid_keys_intersection = [k for k in keys if k not in d1.keys()]
247 for k in invalid_keys_intersection:
248 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
249 valid_keys_intersection = [k for k in keys if k in d1.keys()]
250 for k in valid_keys_intersection:
251 acc = merge_errors(
252 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
253 )
254 else:
255 acc = add_error(
256 acc,
257 "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format(
258 json_type(d1), json_type(d2)
259 ),
260 points=2,
261 )
262
263 return acc
264
265
266 def json_cmp(d1, d2, exact=False):
267 """
268 JSON compare function. Receives two parameters:
269 * `d1`: parsed JSON data structure
270 * `d2`: parsed JSON data structure
271
272 Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
273 in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an
274 error report is generated and wrapped in a 'json_cmp_result()'. There are special
275 parameters and notations explained below which can be used to cover rather unusual
276 cases:
277
278 * when 'exact is set to 'True' then d1 and d2 are tested for equality (including
279 order within JSON Arrays)
280 * using 'null' (or 'None' in Python) as JSON Object value is checking for key
281 absence in d1
282 * using '*' as JSON Object value or Array value is checking for presence in d1
283 without checking the values
284 * using '__ordered__' as first element in a JSON Array in d2 will also check the
285 order when it is compared to an Array in d1
286 """
287
288 (errors_n, errors) = gen_json_diff_report(deepcopy(d1), deepcopy(d2), exact=exact)
289
290 if errors_n > 0:
291 result = json_cmp_result()
292 result.add_error(errors)
293 return result
294 else:
295 return None
296
297
298 def router_output_cmp(router, cmd, expected):
299 """
300 Runs `cmd` in router and compares the output with `expected`.
301 """
302 return difflines(
303 normalize_text(router.vtysh_cmd(cmd)),
304 normalize_text(expected),
305 title1="Current output",
306 title2="Expected output",
307 )
308
309
310 def router_json_cmp(router, cmd, data, exact=False):
311 """
312 Runs `cmd` that returns JSON data (normally the command ends with 'json')
313 and compare with `data` contents.
314 """
315 return json_cmp(router.vtysh_cmd(cmd, isjson=True), data, exact)
316
317
318 def run_and_expect(func, what, count=20, wait=3):
319 """
320 Run `func` and compare the result with `what`. Do it for `count` times
321 waiting `wait` seconds between tries. By default it tries 20 times with
322 3 seconds delay between tries.
323
324 Returns (True, func-return) on success or
325 (False, func-return) on failure.
326
327 ---
328
329 Helper functions to use with this function:
330 - router_output_cmp
331 - router_json_cmp
332 """
333 start_time = time.time()
334 func_name = "<unknown>"
335 if func.__class__ == functools.partial:
336 func_name = func.func.__name__
337 else:
338 func_name = func.__name__
339
340 # Just a safety-check to avoid running topotests with very
341 # small wait/count arguments.
342 wait_time = wait * count
343 if wait_time < 5:
344 assert (
345 wait_time >= 5
346 ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
347 count, wait
348 )
349
350 logger.debug(
351 "'{}' polling started (interval {} secs, maximum {} tries)".format(
352 func_name, wait, count
353 )
354 )
355
356 while count > 0:
357 result = func()
358 if result != what:
359 time.sleep(wait)
360 count -= 1
361 continue
362
363 end_time = time.time()
364 logger.debug(
365 "'{}' succeeded after {:.2f} seconds".format(
366 func_name, end_time - start_time
367 )
368 )
369 return (True, result)
370
371 end_time = time.time()
372 logger.error(
373 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
374 )
375 return (False, result)
376
377
378 def run_and_expect_type(func, etype, count=20, wait=3, avalue=None):
379 """
380 Run `func` and compare the result with `etype`. Do it for `count` times
381 waiting `wait` seconds between tries. By default it tries 20 times with
382 3 seconds delay between tries.
383
384 This function is used when you want to test the return type and,
385 optionally, the return value.
386
387 Returns (True, func-return) on success or
388 (False, func-return) on failure.
389 """
390 start_time = time.time()
391 func_name = "<unknown>"
392 if func.__class__ == functools.partial:
393 func_name = func.func.__name__
394 else:
395 func_name = func.__name__
396
397 # Just a safety-check to avoid running topotests with very
398 # small wait/count arguments.
399 wait_time = wait * count
400 if wait_time < 5:
401 assert (
402 wait_time >= 5
403 ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
404 count, wait
405 )
406
407 logger.debug(
408 "'{}' polling started (interval {} secs, maximum wait {} secs)".format(
409 func_name, wait, int(wait * count)
410 )
411 )
412
413 while count > 0:
414 result = func()
415 if not isinstance(result, etype):
416 logger.debug(
417 "Expected result type '{}' got '{}' instead".format(etype, type(result))
418 )
419 time.sleep(wait)
420 count -= 1
421 continue
422
423 if etype != type(None) and avalue != None and result != avalue:
424 logger.debug("Expected value '{}' got '{}' instead".format(avalue, result))
425 time.sleep(wait)
426 count -= 1
427 continue
428
429 end_time = time.time()
430 logger.debug(
431 "'{}' succeeded after {:.2f} seconds".format(
432 func_name, end_time - start_time
433 )
434 )
435 return (True, result)
436
437 end_time = time.time()
438 logger.error(
439 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
440 )
441 return (False, result)
442
443
444 def router_json_cmp_retry(router, cmd, data, exact=False, retry_timeout=10.0):
445 """
446 Runs `cmd` that returns JSON data (normally the command ends with 'json')
447 and compare with `data` contents. Retry by default for 10 seconds
448 """
449
450 def test_func():
451 return router_json_cmp(router, cmd, data, exact)
452
453 ok, _ = run_and_expect(test_func, None, int(retry_timeout), 1)
454 return ok
455
456
457 def int2dpid(dpid):
458 "Converting Integer to DPID"
459
460 try:
461 dpid = hex(dpid)[2:]
462 dpid = "0" * (16 - len(dpid)) + dpid
463 return dpid
464 except IndexError:
465 raise Exception(
466 "Unable to derive default datapath ID - "
467 "please either specify a dpid or use a "
468 "canonical switch name such as s23."
469 )
470
471
472 def get_textdiff(text1, text2, title1="", title2="", **opts):
473 "Returns empty string if same or formatted diff"
474
475 diff = "\n".join(
476 difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts)
477 )
478 # Clean up line endings
479 diff = os.linesep.join([s for s in diff.splitlines() if s])
480 return diff
481
482
483 def difflines(text1, text2, title1="", title2="", **opts):
484 "Wrapper for get_textdiff to avoid string transformations."
485 text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1)
486 text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1)
487 return get_textdiff(text1, text2, title1, title2, **opts)
488
489
490 def get_file(content):
491 """
492 Generates a temporary file in '/tmp' with `content` and returns the file name.
493 """
494 if isinstance(content, list) or isinstance(content, tuple):
495 content = "\n".join(content)
496 fde = tempfile.NamedTemporaryFile(mode="w", delete=False)
497 fname = fde.name
498 fde.write(content)
499 fde.close()
500 return fname
501
502
503 def normalize_text(text):
504 """
505 Strips formating spaces/tabs, carriage returns and trailing whitespace.
506 """
507 text = re.sub(r"[ \t]+", " ", text)
508 text = re.sub(r"\r", "", text)
509
510 # Remove whitespace in the middle of text.
511 text = re.sub(r"[ \t]+\n", "\n", text)
512 # Remove whitespace at the end of the text.
513 text = text.rstrip()
514
515 return text
516
517
518 def is_linux():
519 """
520 Parses unix name output to check if running on GNU/Linux.
521
522 Returns True if running on Linux, returns False otherwise.
523 """
524
525 if os.uname()[0] == "Linux":
526 return True
527 return False
528
529
530 def iproute2_is_vrf_capable():
531 """
532 Checks if the iproute2 version installed on the system is capable of
533 handling VRFs by interpreting the output of the 'ip' utility found in PATH.
534
535 Returns True if capability can be detected, returns False otherwise.
536 """
537
538 if is_linux():
539 try:
540 subp = subprocess.Popen(
541 ["ip", "route", "show", "vrf"],
542 stdout=subprocess.PIPE,
543 stderr=subprocess.PIPE,
544 stdin=subprocess.PIPE,
545 )
546 iproute2_err = subp.communicate()[1].splitlines()[0].split()[0]
547
548 if iproute2_err != "Error:":
549 return True
550 except Exception:
551 pass
552 return False
553
554
555 def iproute2_is_fdb_get_capable():
556 """
557 Checks if the iproute2 version installed on the system is capable of
558 handling `bridge fdb get` commands to query neigh table resolution.
559
560 Returns True if capability can be detected, returns False otherwise.
561 """
562
563 if is_linux():
564 try:
565 subp = subprocess.Popen(
566 ["bridge", "fdb", "get", "help"],
567 stdout=subprocess.PIPE,
568 stderr=subprocess.PIPE,
569 stdin=subprocess.PIPE,
570 )
571 iproute2_out = subp.communicate()[1].splitlines()[0].split()[0]
572
573 if "Usage" in str(iproute2_out):
574 return True
575 except Exception:
576 pass
577 return False
578
579
580 def module_present_linux(module, load):
581 """
582 Returns whether `module` is present.
583
584 If `load` is true, it will try to load it via modprobe.
585 """
586 with open("/proc/modules", "r") as modules_file:
587 if module.replace("-", "_") in modules_file.read():
588 return True
589 cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module)
590 if os.system(cmd) != 0:
591 return False
592 else:
593 return True
594
595
596 def module_present_freebsd(module, load):
597 return True
598
599
600 def module_present(module, load=True):
601 if sys.platform.startswith("linux"):
602 return module_present_linux(module, load)
603 elif sys.platform.startswith("freebsd"):
604 return module_present_freebsd(module, load)
605
606
607 def version_cmp(v1, v2):
608 """
609 Compare two version strings and returns:
610
611 * `-1`: if `v1` is less than `v2`
612 * `0`: if `v1` is equal to `v2`
613 * `1`: if `v1` is greater than `v2`
614
615 Raises `ValueError` if versions are not well formated.
616 """
617 vregex = r"(?P<whole>\d+(\.(\d+))*)"
618 v1m = re.match(vregex, v1)
619 v2m = re.match(vregex, v2)
620 if v1m is None or v2m is None:
621 raise ValueError("got a invalid version string")
622
623 # Split values
624 v1g = v1m.group("whole").split(".")
625 v2g = v2m.group("whole").split(".")
626
627 # Get the longest version string
628 vnum = len(v1g)
629 if len(v2g) > vnum:
630 vnum = len(v2g)
631
632 # Reverse list because we are going to pop the tail
633 v1g.reverse()
634 v2g.reverse()
635 for _ in range(vnum):
636 try:
637 v1n = int(v1g.pop())
638 except IndexError:
639 while v2g:
640 v2n = int(v2g.pop())
641 if v2n > 0:
642 return -1
643 break
644
645 try:
646 v2n = int(v2g.pop())
647 except IndexError:
648 if v1n > 0:
649 return 1
650 while v1g:
651 v1n = int(v1g.pop())
652 if v1n > 0:
653 return 1
654 break
655
656 if v1n > v2n:
657 return 1
658 if v1n < v2n:
659 return -1
660 return 0
661
662
663 def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None):
664 if ifaceaction:
665 str_ifaceaction = "no shutdown"
666 else:
667 str_ifaceaction = "shutdown"
668 if vrf_name == None:
669 cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
670 ifacename, str_ifaceaction
671 )
672 else:
673 cmd = (
674 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
675 ifacename, vrf_name, str_ifaceaction
676 )
677 )
678 node.run(cmd)
679
680
681 def ip4_route_zebra(node, vrf_name=None):
682 """
683 Gets an output of 'show ip route' command. It can be used
684 with comparing the output to a reference
685 """
686 if vrf_name == None:
687 tmp = node.vtysh_cmd("show ip route")
688 else:
689 tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name))
690 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
691
692 lines = output.splitlines()
693 header_found = False
694 while lines and (not lines[0].strip() or not header_found):
695 if "o - offload failure" in lines[0]:
696 header_found = True
697 lines = lines[1:]
698 return "\n".join(lines)
699
700
701 def ip6_route_zebra(node, vrf_name=None):
702 """
703 Retrieves the output of 'show ipv6 route [vrf vrf_name]', then
704 canonicalizes it by eliding link-locals.
705 """
706
707 if vrf_name == None:
708 tmp = node.vtysh_cmd("show ipv6 route")
709 else:
710 tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name))
711
712 # Mask out timestamp
713 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
714
715 # Mask out the link-local addresses
716 output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output)
717
718 lines = output.splitlines()
719 header_found = False
720 while lines and (not lines[0].strip() or not header_found):
721 if "o - offload failure" in lines[0]:
722 header_found = True
723 lines = lines[1:]
724
725 return "\n".join(lines)
726
727
728 def proto_name_to_number(protocol):
729 return {
730 "bgp": "186",
731 "isis": "187",
732 "ospf": "188",
733 "rip": "189",
734 "ripng": "190",
735 "nhrp": "191",
736 "eigrp": "192",
737 "ldp": "193",
738 "sharp": "194",
739 "pbr": "195",
740 "static": "196",
741 "ospf6": "197",
742 }.get(
743 protocol, protocol
744 ) # default return same as input
745
746
747 def ip4_route(node):
748 """
749 Gets a structured return of the command 'ip route'. It can be used in
750 conjunction with json_cmp() to provide accurate assert explanations.
751
752 Return example:
753 {
754 '10.0.1.0/24': {
755 'dev': 'eth0',
756 'via': '172.16.0.1',
757 'proto': '188',
758 },
759 '10.0.2.0/24': {
760 'dev': 'eth1',
761 'proto': 'kernel',
762 }
763 }
764 """
765 output = normalize_text(node.run("ip route")).splitlines()
766 result = {}
767 for line in output:
768 columns = line.split(" ")
769 route = result[columns[0]] = {}
770 prev = None
771 for column in columns:
772 if prev == "dev":
773 route["dev"] = column
774 if prev == "via":
775 route["via"] = column
776 if prev == "proto":
777 # translate protocol names back to numbers
778 route["proto"] = proto_name_to_number(column)
779 if prev == "metric":
780 route["metric"] = column
781 if prev == "scope":
782 route["scope"] = column
783 prev = column
784
785 return result
786
787
788 def ip4_vrf_route(node):
789 """
790 Gets a structured return of the command 'ip route show vrf {0}-cust1'.
791 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
792
793 Return example:
794 {
795 '10.0.1.0/24': {
796 'dev': 'eth0',
797 'via': '172.16.0.1',
798 'proto': '188',
799 },
800 '10.0.2.0/24': {
801 'dev': 'eth1',
802 'proto': 'kernel',
803 }
804 }
805 """
806 output = normalize_text(
807 node.run("ip route show vrf {0}-cust1".format(node.name))
808 ).splitlines()
809
810 result = {}
811 for line in output:
812 columns = line.split(" ")
813 route = result[columns[0]] = {}
814 prev = None
815 for column in columns:
816 if prev == "dev":
817 route["dev"] = column
818 if prev == "via":
819 route["via"] = column
820 if prev == "proto":
821 # translate protocol names back to numbers
822 route["proto"] = proto_name_to_number(column)
823 if prev == "metric":
824 route["metric"] = column
825 if prev == "scope":
826 route["scope"] = column
827 prev = column
828
829 return result
830
831
832 def ip6_route(node):
833 """
834 Gets a structured return of the command 'ip -6 route'. It can be used in
835 conjunction with json_cmp() to provide accurate assert explanations.
836
837 Return example:
838 {
839 '2001:db8:1::/64': {
840 'dev': 'eth0',
841 'proto': '188',
842 },
843 '2001:db8:2::/64': {
844 'dev': 'eth1',
845 'proto': 'kernel',
846 }
847 }
848 """
849 output = normalize_text(node.run("ip -6 route")).splitlines()
850 result = {}
851 for line in output:
852 columns = line.split(" ")
853 route = result[columns[0]] = {}
854 prev = None
855 for column in columns:
856 if prev == "dev":
857 route["dev"] = column
858 if prev == "via":
859 route["via"] = column
860 if prev == "proto":
861 # translate protocol names back to numbers
862 route["proto"] = proto_name_to_number(column)
863 if prev == "metric":
864 route["metric"] = column
865 if prev == "pref":
866 route["pref"] = column
867 prev = column
868
869 return result
870
871
872 def ip6_vrf_route(node):
873 """
874 Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'.
875 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
876
877 Return example:
878 {
879 '2001:db8:1::/64': {
880 'dev': 'eth0',
881 'proto': '188',
882 },
883 '2001:db8:2::/64': {
884 'dev': 'eth1',
885 'proto': 'kernel',
886 }
887 }
888 """
889 output = normalize_text(
890 node.run("ip -6 route show vrf {0}-cust1".format(node.name))
891 ).splitlines()
892 result = {}
893 for line in output:
894 columns = line.split(" ")
895 route = result[columns[0]] = {}
896 prev = None
897 for column in columns:
898 if prev == "dev":
899 route["dev"] = column
900 if prev == "via":
901 route["via"] = column
902 if prev == "proto":
903 # translate protocol names back to numbers
904 route["proto"] = proto_name_to_number(column)
905 if prev == "metric":
906 route["metric"] = column
907 if prev == "pref":
908 route["pref"] = column
909 prev = column
910
911 return result
912
913
914 def ip_rules(node):
915 """
916 Gets a structured return of the command 'ip rule'. It can be used in
917 conjunction with json_cmp() to provide accurate assert explanations.
918
919 Return example:
920 [
921 {
922 "pref": "0"
923 "from": "all"
924 },
925 {
926 "pref": "32766"
927 "from": "all"
928 },
929 {
930 "to": "3.4.5.0/24",
931 "iif": "r1-eth2",
932 "pref": "304",
933 "from": "1.2.0.0/16",
934 "proto": "zebra"
935 }
936 ]
937 """
938 output = normalize_text(node.run("ip rule")).splitlines()
939 result = []
940 for line in output:
941 columns = line.split(" ")
942
943 route = {}
944 # remove last character, since it is ':'
945 pref = columns[0][:-1]
946 route["pref"] = pref
947 prev = None
948 for column in columns:
949 if prev == "from":
950 route["from"] = column
951 if prev == "to":
952 route["to"] = column
953 if prev == "proto":
954 route["proto"] = column
955 if prev == "iif":
956 route["iif"] = column
957 if prev == "fwmark":
958 route["fwmark"] = column
959 prev = column
960
961 result.append(route)
962 return result
963
964
965 def sleep(amount, reason=None):
966 """
967 Sleep wrapper that registers in the log the amount of sleep
968 """
969 if reason is None:
970 logger.info("Sleeping for {} seconds".format(amount))
971 else:
972 logger.info(reason + " ({} seconds)".format(amount))
973
974 time.sleep(amount)
975
976
977 def checkAddressSanitizerError(output, router, component, logdir=""):
978 "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
979
980 def processAddressSanitizerError(asanErrorRe, output, router, component):
981 sys.stderr.write(
982 "%s: %s triggered an exception by AddressSanitizer\n" % (router, component)
983 )
984 # Sanitizer Error found in log
985 pidMark = asanErrorRe.group(1)
986 addressSanitizerLog = re.search(
987 "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL
988 )
989 if addressSanitizerLog:
990 # Find Calling Test. Could be multiple steps back
991 testframe = sys._current_frames().values()[0]
992 level = 0
993 while level < 10:
994 test = os.path.splitext(
995 os.path.basename(testframe.f_globals["__file__"])
996 )[0]
997 if (test != "topotest") and (test != "topogen"):
998 # Found the calling test
999 callingTest = os.path.basename(testframe.f_globals["__file__"])
1000 break
1001 level = level + 1
1002 testframe = testframe.f_back
1003 if level >= 10:
1004 # somehow couldn't find the test script.
1005 callingTest = "unknownTest"
1006 #
1007 # Now finding Calling Procedure
1008 level = 0
1009 while level < 20:
1010 callingProc = sys._getframe(level).f_code.co_name
1011 if (
1012 (callingProc != "processAddressSanitizerError")
1013 and (callingProc != "checkAddressSanitizerError")
1014 and (callingProc != "checkRouterCores")
1015 and (callingProc != "stopRouter")
1016 and (callingProc != "stop")
1017 and (callingProc != "stop_topology")
1018 and (callingProc != "checkRouterRunning")
1019 and (callingProc != "check_router_running")
1020 and (callingProc != "routers_have_failure")
1021 ):
1022 # Found the calling test
1023 break
1024 level = level + 1
1025 if level >= 20:
1026 # something wrong - couldn't found the calling test function
1027 callingProc = "unknownProc"
1028 with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile:
1029 sys.stderr.write(
1030 "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1031 % (callingTest, callingProc, router)
1032 )
1033 sys.stderr.write(
1034 "\n".join(addressSanitizerLog.group(1).splitlines()) + "\n"
1035 )
1036 addrSanFile.write("## Error: %s\n\n" % asanErrorRe.group(2))
1037 addrSanFile.write(
1038 "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1039 % (callingTest, callingProc, router)
1040 )
1041 addrSanFile.write(
1042 " "
1043 + "\n ".join(addressSanitizerLog.group(1).splitlines())
1044 + "\n"
1045 )
1046 addrSanFile.write("\n---------------\n")
1047 return
1048
1049 addressSanitizerError = re.search(
1050 r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
1051 )
1052 if addressSanitizerError:
1053 processAddressSanitizerError(addressSanitizerError, output, router, component)
1054 return True
1055
1056 # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
1057 if logdir:
1058 filepattern = logdir + "/" + router + ".asan." + component + ".*"
1059 logger.debug(
1060 "Log check for %s on %s, pattern %s\n" % (component, router, filepattern)
1061 )
1062 for file in glob.glob(filepattern):
1063 with open(file, "r") as asanErrorFile:
1064 asanError = asanErrorFile.read()
1065 addressSanitizerError = re.search(
1066 r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
1067 )
1068 if addressSanitizerError:
1069 processAddressSanitizerError(
1070 addressSanitizerError, asanError, router, component
1071 )
1072 return True
1073 return False
1074
1075
1076 def _sysctl_atleast(commander, variable, min_value):
1077 if isinstance(min_value, tuple):
1078 min_value = list(min_value)
1079 is_list = isinstance(min_value, list)
1080
1081 sval = commander.cmd_raises("sysctl -n " + variable).strip()
1082 if is_list:
1083 cur_val = [int(x) for x in sval.split()]
1084 else:
1085 cur_val = int(sval)
1086
1087 set_value = False
1088 if is_list:
1089 for i, v in enumerate(cur_val):
1090 if v < min_value[i]:
1091 set_value = True
1092 else:
1093 min_value[i] = v
1094 else:
1095 if cur_val < min_value:
1096 set_value = True
1097 if set_value:
1098 if is_list:
1099 valstr = " ".join([str(x) for x in min_value])
1100 else:
1101 valstr = str(min_value)
1102 logger.debug("Increasing sysctl %s from %s to %s", variable, cur_val, valstr)
1103 commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
1104
1105
1106 def _sysctl_assure(commander, variable, value):
1107 if isinstance(value, tuple):
1108 value = list(value)
1109 is_list = isinstance(value, list)
1110
1111 sval = commander.cmd_raises("sysctl -n " + variable).strip()
1112 if is_list:
1113 cur_val = [int(x) for x in sval.split()]
1114 else:
1115 cur_val = sval
1116
1117 set_value = False
1118 if is_list:
1119 for i, v in enumerate(cur_val):
1120 if v != value[i]:
1121 set_value = True
1122 else:
1123 value[i] = v
1124 else:
1125 if cur_val != str(value):
1126 set_value = True
1127
1128 if set_value:
1129 if is_list:
1130 valstr = " ".join([str(x) for x in value])
1131 else:
1132 valstr = str(value)
1133 logger.debug("Changing sysctl %s from %s to %s", variable, cur_val, valstr)
1134 commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
1135
1136
1137 def sysctl_atleast(commander, variable, min_value, raises=False):
1138 try:
1139 if commander is None:
1140 commander = micronet.Commander("topotest")
1141 return _sysctl_atleast(commander, variable, min_value)
1142 except subprocess.CalledProcessError as error:
1143 logger.warning(
1144 "%s: Failed to assure sysctl min value %s = %s",
1145 commander,
1146 variable,
1147 min_value,
1148 )
1149 if raises:
1150 raise
1151
1152
1153 def sysctl_assure(commander, variable, value, raises=False):
1154 try:
1155 if commander is None:
1156 commander = micronet.Commander("topotest")
1157 return _sysctl_assure(commander, variable, value)
1158 except subprocess.CalledProcessError as error:
1159 logger.warning(
1160 "%s: Failed to assure sysctl value %s = %s",
1161 commander,
1162 variable,
1163 value,
1164 exc_info=True,
1165 )
1166 if raises:
1167 raise
1168
1169
1170 def rlimit_atleast(rname, min_value, raises=False):
1171 try:
1172 cval = resource.getrlimit(rname)
1173 soft, hard = cval
1174 if soft < min_value:
1175 nval = (min_value, hard if min_value < hard else min_value)
1176 logger.debug("Increasing rlimit %s from %s to %s", rname, cval, nval)
1177 resource.setrlimit(rname, nval)
1178 except subprocess.CalledProcessError as error:
1179 logger.warning(
1180 "Failed to assure rlimit [%s] = %s", rname, min_value, exc_info=True
1181 )
1182 if raises:
1183 raise
1184
1185
1186 def fix_netns_limits(ns):
1187 # Maximum read and write socket buffer sizes
1188 sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2**20])
1189 sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2**20])
1190
1191 sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0)
1192 sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0)
1193 sysctl_assure(ns, "net.ipv4.conf.lo.rp_filter", 0)
1194
1195 sysctl_assure(ns, "net.ipv4.conf.all.forwarding", 1)
1196 sysctl_assure(ns, "net.ipv4.conf.default.forwarding", 1)
1197
1198 # XXX if things fail look here as this wasn't done previously
1199 sysctl_assure(ns, "net.ipv6.conf.all.forwarding", 1)
1200 sysctl_assure(ns, "net.ipv6.conf.default.forwarding", 1)
1201
1202 # ARP
1203 sysctl_assure(ns, "net.ipv4.conf.default.arp_announce", 2)
1204 sysctl_assure(ns, "net.ipv4.conf.default.arp_notify", 1)
1205 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1206 sysctl_assure(ns, "net.ipv4.conf.default.arp_ignore", 0)
1207 sysctl_assure(ns, "net.ipv4.conf.all.arp_announce", 2)
1208 sysctl_assure(ns, "net.ipv4.conf.all.arp_notify", 1)
1209 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1210 sysctl_assure(ns, "net.ipv4.conf.all.arp_ignore", 0)
1211
1212 sysctl_assure(ns, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
1213
1214 # Keep ipv6 permanent addresses on an admin down
1215 sysctl_assure(ns, "net.ipv6.conf.all.keep_addr_on_down", 1)
1216 if version_cmp(platform.release(), "4.20") >= 0:
1217 sysctl_assure(ns, "net.ipv6.route.skip_notify_on_dev_down", 1)
1218
1219 sysctl_assure(ns, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
1220 sysctl_assure(ns, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
1221
1222 # igmp
1223 sysctl_atleast(ns, "net.ipv4.igmp_max_memberships", 1000)
1224
1225 # Use neigh information on selection of nexthop for multipath hops
1226 sysctl_assure(ns, "net.ipv4.fib_multipath_use_neigh", 1)
1227
1228
1229 def fix_host_limits():
1230 """Increase system limits."""
1231
1232 rlimit_atleast(resource.RLIMIT_NPROC, 8 * 1024)
1233 rlimit_atleast(resource.RLIMIT_NOFILE, 16 * 1024)
1234 sysctl_atleast(None, "fs.file-max", 16 * 1024)
1235 sysctl_atleast(None, "kernel.pty.max", 16 * 1024)
1236
1237 # Enable coredumps
1238 # Original on ubuntu 17.x, but apport won't save as in namespace
1239 # |/usr/share/apport/apport %p %s %c %d %P
1240 sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
1241 sysctl_assure(None, "kernel.core_uses_pid", 1)
1242 sysctl_assure(None, "fs.suid_dumpable", 1)
1243
1244 # Maximum connection backlog
1245 sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
1246
1247 # Maximum read and write socket buffer sizes
1248 sysctl_atleast(None, "net.core.rmem_max", 16 * 2**20)
1249 sysctl_atleast(None, "net.core.wmem_max", 16 * 2**20)
1250
1251 # Garbage Collection Settings for ARP and Neighbors
1252 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
1253 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024)
1254 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024)
1255 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024)
1256 # Hold entries for 10 minutes
1257 sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1258 sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1259
1260 # igmp
1261 sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
1262
1263 # MLD
1264 sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
1265
1266 # Increase routing table size to 128K
1267 sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024)
1268 sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024)
1269
1270
1271 def setup_node_tmpdir(logdir, name):
1272 # Cleanup old log, valgrind, and core files.
1273 subprocess.check_call(
1274 "rm -rf {0}/{1}.valgrind.* {0}/{1}.asan.* {0}/{1}/".format(logdir, name),
1275 shell=True,
1276 )
1277
1278 # Setup the per node directory.
1279 nodelogdir = "{}/{}".format(logdir, name)
1280 subprocess.check_call(
1281 "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True
1282 )
1283 logfile = "{0}/{1}.log".format(logdir, name)
1284 return logfile
1285
1286
1287 class Router(Node):
1288 "A Node with IPv4/IPv6 forwarding enabled"
1289
1290 def __init__(self, name, *posargs, **params):
1291 # Backward compatibility:
1292 # Load configuration defaults like topogen.
1293 self.config_defaults = configparser.ConfigParser(
1294 defaults={
1295 "verbosity": "info",
1296 "frrdir": "/usr/lib/frr",
1297 "routertype": "frr",
1298 "memleak_path": "",
1299 }
1300 )
1301
1302 self.config_defaults.read(
1303 os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini")
1304 )
1305
1306 self.perf_daemons = {}
1307
1308 # If this topology is using old API and doesn't have logdir
1309 # specified, then attempt to generate an unique logdir.
1310 self.logdir = params.get("logdir")
1311 if self.logdir is None:
1312 self.logdir = get_logs_path(g_pytest_config.getoption("--rundir"))
1313
1314 if not params.get("logger"):
1315 # If logger is present topogen has already set this up
1316 logfile = setup_node_tmpdir(self.logdir, name)
1317 l = topolog.get_logger(name, log_level="debug", target=logfile)
1318 params["logger"] = l
1319
1320 super(Router, self).__init__(name, *posargs, **params)
1321
1322 self.daemondir = None
1323 self.hasmpls = False
1324 self.routertype = "frr"
1325 self.unified_config = None
1326 self.daemons = {
1327 "zebra": 0,
1328 "ripd": 0,
1329 "ripngd": 0,
1330 "ospfd": 0,
1331 "ospf6d": 0,
1332 "isisd": 0,
1333 "bgpd": 0,
1334 "pimd": 0,
1335 "pim6d": 0,
1336 "ldpd": 0,
1337 "eigrpd": 0,
1338 "nhrpd": 0,
1339 "staticd": 0,
1340 "bfdd": 0,
1341 "sharpd": 0,
1342 "babeld": 0,
1343 "pbrd": 0,
1344 "pathd": 0,
1345 "snmpd": 0,
1346 "mgmtd": 0,
1347 }
1348 self.daemons_options = {"zebra": ""}
1349 self.reportCores = True
1350 self.version = None
1351
1352 self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid)
1353 try:
1354 # Allow escaping from running inside docker
1355 cgroup = open("/proc/1/cgroup").read()
1356 m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup)
1357 if m:
1358 self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd
1359 except IOError:
1360 pass
1361 else:
1362 logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd))
1363
1364 def _config_frr(self, **params):
1365 "Configure FRR binaries"
1366 self.daemondir = params.get("frrdir")
1367 if self.daemondir is None:
1368 self.daemondir = self.config_defaults.get("topogen", "frrdir")
1369
1370 zebra_path = os.path.join(self.daemondir, "zebra")
1371 if not os.path.isfile(zebra_path):
1372 raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
1373
1374 mgmtd_path = os.path.join(self.daemondir, "mgmtd")
1375 if not os.path.isfile(mgmtd_path):
1376 raise Exception("FRR MGMTD binary doesn't exist at {}".format(mgmtd_path))
1377
1378 # pylint: disable=W0221
1379 # Some params are only meaningful for the parent class.
1380 def config_host(self, **params):
1381 super(Router, self).config_host(**params)
1382
1383 # User did not specify the daemons directory, try to autodetect it.
1384 self.daemondir = params.get("daemondir")
1385 if self.daemondir is None:
1386 self.routertype = params.get(
1387 "routertype", self.config_defaults.get("topogen", "routertype")
1388 )
1389 self._config_frr(**params)
1390 else:
1391 # Test the provided path
1392 zpath = os.path.join(self.daemondir, "zebra")
1393 if not os.path.isfile(zpath):
1394 raise Exception("No zebra binary found in {}".format(zpath))
1395
1396 cpath = os.path.join(self.daemondir, "mgmtd")
1397 if not os.path.isfile(zpath):
1398 raise Exception("No MGMTD binary found in {}".format(cpath))
1399 # Allow user to specify routertype when the path was specified.
1400 if params.get("routertype") is not None:
1401 self.routertype = params.get("routertype")
1402
1403 # Set ownership of config files
1404 self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype))
1405
1406 def terminate(self):
1407 # Stop running FRR daemons
1408 self.stopRouter()
1409 super(Router, self).terminate()
1410 os.system("chmod -R go+rw " + self.logdir)
1411
1412 # Return count of running daemons
1413 def listDaemons(self):
1414 ret = []
1415 rc, stdout, _ = self.cmd_status(
1416 "ls -1 /var/run/%s/*.pid" % self.routertype, warn=False
1417 )
1418 if rc:
1419 return ret
1420 for d in stdout.strip().split("\n"):
1421 pidfile = d.strip()
1422 try:
1423 pid = int(self.cmd_raises("cat %s" % pidfile, warn=False).strip())
1424 name = os.path.basename(pidfile[:-4])
1425
1426 # probably not compatible with bsd.
1427 rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False)
1428 if rc:
1429 logger.warning(
1430 "%s: %s exited leaving pidfile %s (%s)",
1431 self.name,
1432 name,
1433 pidfile,
1434 pid,
1435 )
1436 self.cmd("rm -- " + pidfile)
1437 else:
1438 ret.append((name, pid))
1439 except (subprocess.CalledProcessError, ValueError):
1440 pass
1441 return ret
1442
1443 def stopRouter(self, assertOnError=True, minErrorVersion="5.1"):
1444 # Stop Running FRR Daemons
1445 running = self.listDaemons()
1446 if not running:
1447 return ""
1448
1449 logger.info("%s: stopping %s", self.name, ", ".join([x[0] for x in running]))
1450 for name, pid in running:
1451 logger.debug("{}: sending SIGTERM to {}".format(self.name, name))
1452 try:
1453 os.kill(pid, signal.SIGTERM)
1454 except OSError as err:
1455 logger.debug(
1456 "%s: could not kill %s (%s): %s", self.name, name, pid, str(err)
1457 )
1458
1459 running = self.listDaemons()
1460 if running:
1461 for _ in range(0, 30):
1462 sleep(
1463 0.5,
1464 "{}: waiting for daemons stopping: {}".format(
1465 self.name, ", ".join([x[0] for x in running])
1466 ),
1467 )
1468 running = self.listDaemons()
1469 if not running:
1470 break
1471
1472 if running:
1473 logger.warning(
1474 "%s: sending SIGBUS to: %s",
1475 self.name,
1476 ", ".join([x[0] for x in running]),
1477 )
1478 for name, pid in running:
1479 pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
1480 logger.info("%s: killing %s", self.name, name)
1481 self.cmd("kill -SIGBUS %d" % pid)
1482 self.cmd("rm -- " + pidfile)
1483
1484 sleep(
1485 0.5,
1486 "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name,
1487 )
1488
1489 errors = self.checkRouterCores(reportOnce=True)
1490 if self.checkRouterVersion("<", minErrorVersion):
1491 # ignore errors in old versions
1492 errors = ""
1493 if assertOnError and (errors is not None) and len(errors) > 0:
1494 assert "Errors found - details follow:" == 0, errors
1495 return errors
1496
1497 def removeIPs(self):
1498 for interface in self.intfNames():
1499 try:
1500 self.intf_ip_cmd(interface, "ip -4 address flush " + interface)
1501 self.intf_ip_cmd(
1502 interface, "ip -6 address flush " + interface + " scope global"
1503 )
1504 except Exception as ex:
1505 logger.error("%s can't remove IPs %s", self, str(ex))
1506 # breakpoint()
1507 # assert False, "can't remove IPs %s" % str(ex)
1508
1509 def checkCapability(self, daemon, param):
1510 if param is not None:
1511 daemon_path = os.path.join(self.daemondir, daemon)
1512 daemon_search_option = param.replace("-", "")
1513 output = self.cmd(
1514 "{0} -h | grep {1}".format(daemon_path, daemon_search_option)
1515 )
1516 if daemon_search_option not in output:
1517 return False
1518 return True
1519
1520 def loadConf(self, daemon, source=None, param=None):
1521 """Enabled and set config for a daemon.
1522
1523 Arranges for loading of daemon configuration from the specified source. Possible
1524 `source` values are `None` for an empty config file, a path name which is used
1525 directly, or a file name with no path components which is first looked for
1526 directly and then looked for under a sub-directory named after router.
1527 """
1528
1529 # Unfortunately this API allowsfor source to not exist for any and all routers.
1530 if source is None:
1531 source = f"{daemon}.conf"
1532
1533 if source:
1534 head, tail = os.path.split(source)
1535 if not head and not self.path_exists(tail):
1536 script_dir = os.environ["PYTEST_TOPOTEST_SCRIPTDIR"]
1537 router_relative = os.path.join(script_dir, self.name, tail)
1538 if self.path_exists(router_relative):
1539 source = router_relative
1540 self.logger.debug(
1541 "using router relative configuration: {}".format(source)
1542 )
1543
1544 # print "Daemons before:", self.daemons
1545 if daemon in self.daemons.keys() or daemon == "frr":
1546 if daemon == "frr":
1547 self.unified_config = 1
1548 else:
1549 self.daemons[daemon] = 1
1550 if param is not None:
1551 self.daemons_options[daemon] = param
1552 conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon)
1553 if source is None or not os.path.exists(source):
1554 if daemon == "frr" or not self.unified_config:
1555 self.cmd_raises("rm -f " + conf_file)
1556 self.cmd_raises("touch " + conf_file)
1557 else:
1558 # copy zebra.conf to mgmtd folder, which can be used during startup
1559 if daemon == "zebra":
1560 conf_file_mgmt = "/etc/{}/{}.conf".format(self.routertype, "mgmtd")
1561 self.cmd_raises("cp {} {}".format(source, conf_file_mgmt))
1562 self.cmd_raises("cp {} {}".format(source, conf_file))
1563
1564 if not (self.unified_config or daemon == "frr"):
1565 self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file))
1566 self.cmd_raises("chmod 664 {}".format(conf_file))
1567
1568 if (daemon == "snmpd") and (self.routertype == "frr"):
1569 # /etc/snmp is private mount now
1570 self.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf')
1571 self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
1572
1573 if (daemon == "zebra") and (self.daemons["mgmtd"] == 0):
1574 # Add mgmtd with zebra - if it exists
1575 mgmtd_path = os.path.join(self.daemondir, "mgmtd")
1576 if os.path.isfile(mgmtd_path):
1577 self.daemons["mgmtd"] = 1
1578 self.daemons_options["mgmtd"] = ""
1579 # Auto-Started mgmtd has no config, so it will read from zebra config
1580
1581 if (daemon == "zebra") and (self.daemons["staticd"] == 0):
1582 # Add staticd with zebra - if it exists
1583 staticd_path = os.path.join(self.daemondir, "staticd")
1584 if os.path.isfile(staticd_path):
1585 self.daemons["staticd"] = 1
1586 self.daemons_options["staticd"] = ""
1587 # Auto-Started staticd has no config, so it will read from zebra config
1588
1589 else:
1590 logger.warning("No daemon {} known".format(daemon))
1591 # print "Daemons after:", self.daemons
1592
1593 def runInWindow(self, cmd, title=None):
1594 return self.run_in_window(cmd, title)
1595
1596 def startRouter(self, tgen=None):
1597 if self.unified_config:
1598 self.cmd(
1599 'echo "service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1600 % self.routertype
1601 )
1602 else:
1603 # Disable integrated-vtysh-config
1604 self.cmd(
1605 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1606 % self.routertype
1607 )
1608
1609 self.cmd(
1610 "chown %s:%svty /etc/%s/vtysh.conf"
1611 % (self.routertype, self.routertype, self.routertype)
1612 )
1613 # TODO remove the following lines after all tests are migrated to Topogen.
1614 # Try to find relevant old logfiles in /tmp and delete them
1615 map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
1616 # Remove old core files
1617 map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name)))
1618 # Remove IP addresses from OS first - we have them in zebra.conf
1619 self.removeIPs()
1620 # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
1621 # No error - but return message and skip all the tests
1622 if self.daemons["ldpd"] == 1:
1623 ldpd_path = os.path.join(self.daemondir, "ldpd")
1624 if not os.path.isfile(ldpd_path):
1625 logger.info("LDP Test, but no ldpd compiled or installed")
1626 return "LDP Test, but no ldpd compiled or installed"
1627
1628 if version_cmp(platform.release(), "4.5") < 0:
1629 logger.info("LDP Test need Linux Kernel 4.5 minimum")
1630 return "LDP Test need Linux Kernel 4.5 minimum"
1631 # Check if have mpls
1632 if tgen != None:
1633 self.hasmpls = tgen.hasmpls
1634 if self.hasmpls != True:
1635 logger.info(
1636 "LDP/MPLS Tests will be skipped, platform missing module(s)"
1637 )
1638 else:
1639 # Test for MPLS Kernel modules available
1640 self.hasmpls = False
1641 if not module_present("mpls-router"):
1642 logger.info(
1643 "MPLS tests will not run (missing mpls-router kernel module)"
1644 )
1645 elif not module_present("mpls-iptunnel"):
1646 logger.info(
1647 "MPLS tests will not run (missing mpls-iptunnel kernel module)"
1648 )
1649 else:
1650 self.hasmpls = True
1651 if self.hasmpls != True:
1652 return "LDP/MPLS Tests need mpls kernel modules"
1653
1654 # Really want to use sysctl_atleast here, but only when MPLS is actually being
1655 # used
1656 self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
1657
1658 if g_pytest_config.name_in_option_list(self.name, "--shell"):
1659 self.run_in_window(os.getenv("SHELL", "bash"), title="sh-%s" % self.name)
1660
1661 if self.daemons["eigrpd"] == 1:
1662 eigrpd_path = os.path.join(self.daemondir, "eigrpd")
1663 if not os.path.isfile(eigrpd_path):
1664 logger.info("EIGRP Test, but no eigrpd compiled or installed")
1665 return "EIGRP Test, but no eigrpd compiled or installed"
1666
1667 if self.daemons["bfdd"] == 1:
1668 bfdd_path = os.path.join(self.daemondir, "bfdd")
1669 if not os.path.isfile(bfdd_path):
1670 logger.info("BFD Test, but no bfdd compiled or installed")
1671 return "BFD Test, but no bfdd compiled or installed"
1672
1673 status = self.startRouterDaemons(tgen=tgen)
1674
1675 if g_pytest_config.name_in_option_list(self.name, "--vtysh"):
1676 self.run_in_window("vtysh", title="vt-%s" % self.name)
1677
1678 if self.unified_config:
1679 self.cmd("vtysh -f /etc/frr/frr.conf")
1680
1681 return status
1682
1683 def getStdErr(self, daemon):
1684 return self.getLog("err", daemon)
1685
1686 def getStdOut(self, daemon):
1687 return self.getLog("out", daemon)
1688
1689 def getLog(self, log, daemon):
1690 filename = "{}/{}/{}.{}".format(self.logdir, self.name, daemon, log)
1691 log = ""
1692 with open(filename) as file:
1693 log = file.read()
1694 return log
1695
1696 def startRouterDaemons(self, daemons=None, tgen=None):
1697 "Starts FRR daemons for this router."
1698
1699 asan_abort = bool(g_pytest_config.option.asan_abort)
1700 gdb_breakpoints = g_pytest_config.get_option_list("--gdb-breakpoints")
1701 gdb_daemons = g_pytest_config.get_option_list("--gdb-daemons")
1702 gdb_routers = g_pytest_config.get_option_list("--gdb-routers")
1703 valgrind_extra = bool(g_pytest_config.option.valgrind_extra)
1704 valgrind_memleaks = bool(g_pytest_config.option.valgrind_memleaks)
1705 strace_daemons = g_pytest_config.get_option_list("--strace-daemons")
1706
1707 # Get global bundle data
1708 if not self.path_exists("/etc/frr/support_bundle_commands.conf"):
1709 # Copy global value if was covered by namespace mount
1710 bundle_data = ""
1711 if os.path.exists("/etc/frr/support_bundle_commands.conf"):
1712 with open("/etc/frr/support_bundle_commands.conf", "r") as rf:
1713 bundle_data = rf.read()
1714 self.cmd_raises(
1715 "cat > /etc/frr/support_bundle_commands.conf",
1716 stdin=bundle_data,
1717 )
1718
1719 # Starts actual daemons without init (ie restart)
1720 # cd to per node directory
1721 self.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self.logdir, self.name))
1722 self.set_cwd("{}/{}".format(self.logdir, self.name))
1723 self.cmd("umask 000")
1724
1725 # Re-enable to allow for report per run
1726 self.reportCores = True
1727
1728 # XXX: glue code forward ported from removed function.
1729 if self.version is None:
1730 self.version = self.cmd(
1731 os.path.join(self.daemondir, "bgpd") + " -v"
1732 ).split()[2]
1733 logger.info("{}: running version: {}".format(self.name, self.version))
1734
1735 perfds = {}
1736 perf_options = g_pytest_config.get_option("--perf-options", "-g")
1737 for perf in g_pytest_config.get_option("--perf", []):
1738 if "," in perf:
1739 daemon, routers = perf.split(",", 1)
1740 perfds[daemon] = routers.split(",")
1741 else:
1742 daemon = perf
1743 perfds[daemon] = ["all"]
1744
1745 logd_options = {}
1746 for logd in g_pytest_config.get_option("--logd", []):
1747 if "," in logd:
1748 daemon, routers = logd.split(",", 1)
1749 logd_options[daemon] = routers.split(",")
1750 else:
1751 daemon = logd
1752 logd_options[daemon] = ["all"]
1753
1754 # If `daemons` was specified then some upper API called us with
1755 # specific daemons, otherwise just use our own configuration.
1756 daemons_list = []
1757 if daemons is not None:
1758 daemons_list = daemons
1759 else:
1760 # Append all daemons configured.
1761 for daemon in self.daemons:
1762 if self.daemons[daemon] == 1:
1763 daemons_list.append(daemon)
1764
1765 tail_log_files = []
1766 check_daemon_files = []
1767
1768 def start_daemon(daemon, extra_opts=None):
1769 daemon_opts = self.daemons_options.get(daemon, "")
1770
1771 # get pid and vty filenames and remove the files
1772 m = re.match(r"(.* |^)-n (\d+)( ?.*|$)", daemon_opts)
1773 dfname = daemon if not m else "{}-{}".format(daemon, m.group(2))
1774 runbase = "/var/run/{}/{}".format(self.routertype, dfname)
1775 # If this is a new system bring-up remove the pid/vty files, otherwise
1776 # do not since apparently presence of the pidfile impacts BGP GR
1777 self.cmd_status("rm -f {0}.pid {0}.vty".format(runbase))
1778
1779 rediropt = " > {0}.out 2> {0}.err".format(daemon)
1780 if daemon == "snmpd":
1781 binary = "/usr/sbin/snmpd"
1782 cmdenv = ""
1783 cmdopt = "{} -C -c /etc/frr/snmpd.conf -p ".format(
1784 daemon_opts
1785 ) + "{}.pid -x /etc/frr/agentx".format(runbase)
1786 # check_daemon_files.append(runbase + ".pid")
1787 else:
1788 binary = os.path.join(self.daemondir, daemon)
1789 check_daemon_files.extend([runbase + ".pid", runbase + ".vty"])
1790
1791 cmdenv = "ASAN_OPTIONS="
1792 if asan_abort:
1793 cmdenv += "abort_on_error=1:"
1794 cmdenv += "log_path={0}/{1}.asan.{2} ".format(
1795 self.logdir, self.name, daemon
1796 )
1797
1798 if valgrind_memleaks:
1799 this_dir = os.path.dirname(
1800 os.path.abspath(os.path.realpath(__file__))
1801 )
1802 supp_file = os.path.abspath(
1803 os.path.join(this_dir, "../../../tools/valgrind.supp")
1804 )
1805 cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
1806 daemon, self.logdir, self.name, supp_file
1807 )
1808 if valgrind_extra:
1809 cmdenv += (
1810 " --gen-suppressions=all --expensive-definedness-checks=yes"
1811 )
1812 elif daemon in strace_daemons or "all" in strace_daemons:
1813 cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(
1814 daemon, self.logdir, self.name
1815 )
1816
1817 cmdopt = "{} --command-log-always ".format(daemon_opts)
1818 cmdopt += "--log file:{}.log --log-level debug".format(daemon)
1819
1820 if daemon in logd_options:
1821 logdopt = logd_options[daemon]
1822 if "all" in logdopt or self.name in logdopt:
1823 tail_log_files.append(
1824 "{}/{}/{}.log".format(self.logdir, self.name, daemon)
1825 )
1826 if extra_opts:
1827 cmdopt += " " + extra_opts
1828
1829 if (
1830 (gdb_routers or gdb_daemons)
1831 and (
1832 not gdb_routers or self.name in gdb_routers or "all" in gdb_routers
1833 )
1834 and (not gdb_daemons or daemon in gdb_daemons or "all" in gdb_daemons)
1835 ):
1836 if daemon == "snmpd":
1837 cmdopt += " -f "
1838
1839 cmdopt += rediropt
1840 gdbcmd = "sudo -E gdb " + binary
1841 if gdb_breakpoints:
1842 gdbcmd += " -ex 'set breakpoint pending on'"
1843 for bp in gdb_breakpoints:
1844 gdbcmd += " -ex 'b {}'".format(bp)
1845 gdbcmd += " -ex 'run {}'".format(cmdopt)
1846
1847 self.run_in_window(gdbcmd, daemon)
1848
1849 logger.info(
1850 "%s: %s %s launched in gdb window", self, self.routertype, daemon
1851 )
1852 elif daemon in perfds and (
1853 self.name in perfds[daemon] or "all" in perfds[daemon]
1854 ):
1855 cmdopt += rediropt
1856 cmd = " ".join(
1857 ["perf record {} --".format(perf_options), binary, cmdopt]
1858 )
1859 p = self.popen(cmd)
1860 self.perf_daemons[daemon] = p
1861 if p.poll() and p.returncode:
1862 self.logger.error(
1863 '%s: Failed to launch "%s" (%s) with perf using: %s',
1864 self,
1865 daemon,
1866 p.returncode,
1867 cmd,
1868 )
1869 else:
1870 logger.debug(
1871 "%s: %s %s started with perf", self, self.routertype, daemon
1872 )
1873 else:
1874 if daemon != "snmpd":
1875 cmdopt += " -d "
1876 cmdopt += rediropt
1877
1878 try:
1879 self.cmd_raises(" ".join([cmdenv, binary, cmdopt]), warn=False)
1880 except subprocess.CalledProcessError as error:
1881 self.logger.error(
1882 '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
1883 self,
1884 daemon,
1885 error.returncode,
1886 error.cmd,
1887 '\n:stdout: "{}"'.format(error.stdout.strip())
1888 if error.stdout
1889 else "",
1890 '\n:stderr: "{}"'.format(error.stderr.strip())
1891 if error.stderr
1892 else "",
1893 )
1894 else:
1895 logger.debug("%s: %s %s started", self, self.routertype, daemon)
1896
1897 # Start mgmtd first
1898 if "mgmtd" in daemons_list:
1899 start_daemon("mgmtd")
1900 while "mgmtd" in daemons_list:
1901 daemons_list.remove("mgmtd")
1902
1903 # Start Zebra after mgmtd
1904 if "zebra" in daemons_list:
1905 start_daemon("zebra", "-s 90000000")
1906 while "zebra" in daemons_list:
1907 daemons_list.remove("zebra")
1908
1909 # Start staticd next if required
1910 if "staticd" in daemons_list:
1911 start_daemon("staticd")
1912 while "staticd" in daemons_list:
1913 daemons_list.remove("staticd")
1914
1915 if "snmpd" in daemons_list:
1916 # Give zerbra a chance to configure interface addresses that snmpd daemon
1917 # may then use.
1918 time.sleep(2)
1919
1920 start_daemon("snmpd")
1921 while "snmpd" in daemons_list:
1922 daemons_list.remove("snmpd")
1923
1924 # Now start all the other daemons
1925 for daemon in daemons_list:
1926 if self.daemons[daemon] == 0:
1927 continue
1928 start_daemon(daemon)
1929
1930 # Check if daemons are running.
1931 wait_time = 30 if (gdb_routers or gdb_daemons) else 10
1932 timeout = Timeout(wait_time)
1933 for remaining in timeout:
1934 if not check_daemon_files:
1935 break
1936 check = check_daemon_files[0]
1937 if self.path_exists(check):
1938 check_daemon_files.pop(0)
1939 continue
1940 self.logger.debug("Waiting {}s for {} to appear".format(remaining, check))
1941 time.sleep(0.5)
1942
1943 if check_daemon_files:
1944 assert False, "Timeout({}) waiting for {} to appear on {}".format(
1945 wait_time, check_daemon_files[0], self.name
1946 )
1947
1948 # Update the permissions on the log files
1949 self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name))
1950 self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name))
1951
1952 if "frr" in logd_options:
1953 logdopt = logd_options["frr"]
1954 if "all" in logdopt or self.name in logdopt:
1955 tail_log_files.append("{}/{}/frr.log".format(self.logdir, self.name))
1956
1957 for tailf in tail_log_files:
1958 self.run_in_window("tail -n10000 -F " + tailf, title=tailf, background=True)
1959
1960 return ""
1961
1962 def pid_exists(self, pid):
1963 if pid <= 0:
1964 return False
1965 try:
1966 # If we are not using PID namespaces then we will be a parent of the pid,
1967 # otherwise the init process of the PID namespace will have reaped the proc.
1968 os.waitpid(pid, os.WNOHANG)
1969 except Exception:
1970 pass
1971
1972 rc, o, e = self.cmd_status("kill -0 " + str(pid), warn=False)
1973 return rc == 0 or "No such process" not in e
1974
1975 def killRouterDaemons(
1976 self, daemons, wait=True, assertOnError=True, minErrorVersion="5.1"
1977 ):
1978 # Kill Running FRR
1979 # Daemons(user specified daemon only) using SIGKILL
1980 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1981 errors = ""
1982 daemonsNotRunning = []
1983 if re.search(r"No such file or directory", rundaemons):
1984 return errors
1985 for daemon in daemons:
1986 if rundaemons is not None and daemon in rundaemons:
1987 numRunning = 0
1988 dmns = rundaemons.split("\n")
1989 # Exclude empty string at end of list
1990 for d in dmns[:-1]:
1991 if re.search(r"%s" % daemon, d):
1992 daemonpidfile = d.rstrip()
1993 daemonpid = self.cmd("cat %s" % daemonpidfile).rstrip()
1994 if daemonpid.isdigit() and self.pid_exists(int(daemonpid)):
1995 logger.debug(
1996 "{}: killing {}".format(
1997 self.name,
1998 os.path.basename(daemonpidfile.rsplit(".", 1)[0]),
1999 )
2000 )
2001 self.cmd_status("kill -KILL {}".format(daemonpid))
2002 if self.pid_exists(int(daemonpid)):
2003 numRunning += 1
2004 while wait and numRunning > 0:
2005 sleep(
2006 2,
2007 "{}: waiting for {} daemon to be stopped".format(
2008 self.name, daemon
2009 ),
2010 )
2011
2012 # 2nd round of kill if daemons didn't exit
2013 for d in dmns[:-1]:
2014 if re.search(r"%s" % daemon, d):
2015 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
2016 if daemonpid.isdigit() and self.pid_exists(
2017 int(daemonpid)
2018 ):
2019 logger.info(
2020 "{}: killing {}".format(
2021 self.name,
2022 os.path.basename(
2023 d.rstrip().rsplit(".", 1)[0]
2024 ),
2025 )
2026 )
2027 self.cmd_status(
2028 "kill -KILL {}".format(daemonpid)
2029 )
2030 if daemonpid.isdigit() and not self.pid_exists(
2031 int(daemonpid)
2032 ):
2033 numRunning -= 1
2034 self.cmd("rm -- {}".format(daemonpidfile))
2035 if wait:
2036 errors = self.checkRouterCores(reportOnce=True)
2037 if self.checkRouterVersion("<", minErrorVersion):
2038 # ignore errors in old versions
2039 errors = ""
2040 if assertOnError and len(errors) > 0:
2041 assert "Errors found - details follow:" == 0, errors
2042 else:
2043 daemonsNotRunning.append(daemon)
2044 if len(daemonsNotRunning) > 0:
2045 errors = errors + "Daemons are not running", daemonsNotRunning
2046
2047 return errors
2048
2049 def checkRouterCores(self, reportLeaks=True, reportOnce=False):
2050 if reportOnce and not self.reportCores:
2051 return
2052 reportMade = False
2053 traces = ""
2054 for daemon in self.daemons:
2055 if self.daemons[daemon] == 1:
2056 # Look for core file
2057 corefiles = glob.glob(
2058 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
2059 )
2060 if len(corefiles) > 0:
2061 backtrace = gdb_core(self, daemon, corefiles)
2062 traces = (
2063 traces
2064 + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s"
2065 % (self.name, daemon, backtrace)
2066 )
2067 reportMade = True
2068 elif reportLeaks:
2069 log = self.getStdErr(daemon)
2070 if "memstats" in log:
2071 sys.stderr.write(
2072 "%s: %s has memory leaks:\n" % (self.name, daemon)
2073 )
2074 traces = traces + "\n%s: %s has memory leaks:\n" % (
2075 self.name,
2076 daemon,
2077 )
2078 log = re.sub("core_handler: ", "", log)
2079 log = re.sub(
2080 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
2081 r"\n ## \1",
2082 log,
2083 )
2084 log = re.sub("memstats: ", " ", log)
2085 sys.stderr.write(log)
2086 reportMade = True
2087 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2088 if checkAddressSanitizerError(
2089 self.getStdErr(daemon), self.name, daemon, self.logdir
2090 ):
2091 sys.stderr.write(
2092 "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)
2093 )
2094 traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (
2095 self.name,
2096 daemon,
2097 )
2098 reportMade = True
2099 if reportMade:
2100 self.reportCores = False
2101 return traces
2102
2103 def checkRouterRunning(self):
2104 "Check if router daemons are running and collect crashinfo they don't run"
2105
2106 global fatal_error
2107
2108 daemonsRunning = self.cmd(
2109 'vtysh -c "show logging" | grep "Logging configuration for"'
2110 )
2111 # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
2112 if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"):
2113 return "%s: vtysh killed by AddressSanitizer" % (self.name)
2114
2115 for daemon in self.daemons:
2116 if daemon == "snmpd":
2117 continue
2118 if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
2119 sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
2120 if daemon == "staticd":
2121 sys.stderr.write(
2122 "You may have a copy of staticd installed but are attempting to test against\n"
2123 )
2124 sys.stderr.write(
2125 "a version of FRR that does not have staticd, please cleanup the install dir\n"
2126 )
2127
2128 # Look for core file
2129 corefiles = glob.glob(
2130 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
2131 )
2132 if len(corefiles) > 0:
2133 gdb_core(self, daemon, corefiles)
2134 else:
2135 # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
2136 if os.path.isfile(
2137 "{}/{}/{}.log".format(self.logdir, self.name, daemon)
2138 ):
2139 log_tail = subprocess.check_output(
2140 [
2141 "tail -n20 {}/{}/{}.log 2> /dev/null".format(
2142 self.logdir, self.name, daemon
2143 )
2144 ],
2145 shell=True,
2146 )
2147 sys.stderr.write(
2148 "\nFrom %s %s %s log file:\n"
2149 % (self.routertype, self.name, daemon)
2150 )
2151 sys.stderr.write("%s\n" % log_tail)
2152
2153 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2154 if checkAddressSanitizerError(
2155 self.getStdErr(daemon), self.name, daemon, self.logdir
2156 ):
2157 return "%s: Daemon %s not running - killed by AddressSanitizer" % (
2158 self.name,
2159 daemon,
2160 )
2161
2162 return "%s: Daemon %s not running" % (self.name, daemon)
2163 return ""
2164
2165 def checkRouterVersion(self, cmpop, version):
2166 """
2167 Compares router version using operation `cmpop` with `version`.
2168 Valid `cmpop` values:
2169 * `>=`: has the same version or greater
2170 * '>': has greater version
2171 * '=': has the same version
2172 * '<': has a lesser version
2173 * '<=': has the same version or lesser
2174
2175 Usage example: router.checkRouterVersion('>', '1.0')
2176 """
2177
2178 # Make sure we have version information first
2179 if self.version == None:
2180 self.version = self.cmd(
2181 os.path.join(self.daemondir, "bgpd") + " -v"
2182 ).split()[2]
2183 logger.info("{}: running version: {}".format(self.name, self.version))
2184
2185 rversion = self.version
2186 if rversion == None:
2187 return False
2188
2189 result = version_cmp(rversion, version)
2190 if cmpop == ">=":
2191 return result >= 0
2192 if cmpop == ">":
2193 return result > 0
2194 if cmpop == "=":
2195 return result == 0
2196 if cmpop == "<":
2197 return result < 0
2198 if cmpop == "<":
2199 return result < 0
2200 if cmpop == "<=":
2201 return result <= 0
2202
2203 def get_ipv6_linklocal(self):
2204 "Get LinkLocal Addresses from interfaces"
2205
2206 linklocal = []
2207
2208 ifaces = self.cmd("ip -6 address")
2209 # Fix newlines (make them all the same)
2210 ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
2211 interface = ""
2212 ll_per_if_count = 0
2213 for line in ifaces:
2214 m = re.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line)
2215 if m:
2216 interface = m.group(1)
2217 ll_per_if_count = 0
2218 m = re.search(
2219 "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
2220 line,
2221 )
2222 if m:
2223 local = m.group(1)
2224 ll_per_if_count += 1
2225 if ll_per_if_count > 1:
2226 linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
2227 else:
2228 linklocal += [[interface, local]]
2229 return linklocal
2230
2231 def daemon_available(self, daemon):
2232 "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
2233
2234 daemon_path = os.path.join(self.daemondir, daemon)
2235 if not os.path.isfile(daemon_path):
2236 return False
2237 if daemon == "ldpd":
2238 if version_cmp(platform.release(), "4.5") < 0:
2239 return False
2240 if not module_present("mpls-router", load=False):
2241 return False
2242 if not module_present("mpls-iptunnel", load=False):
2243 return False
2244 return True
2245
2246 def get_routertype(self):
2247 "Return the type of Router (frr)"
2248
2249 return self.routertype
2250
2251 def report_memory_leaks(self, filename_prefix, testscript):
2252 "Report Memory Leaks to file prefixed with given string"
2253
2254 leakfound = False
2255 filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt"
2256 for daemon in self.daemons:
2257 if self.daemons[daemon] == 1:
2258 log = self.getStdErr(daemon)
2259 if "memstats" in log:
2260 # Found memory leak
2261 logger.warning(
2262 "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log)
2263 )
2264 if not leakfound:
2265 leakfound = True
2266 # Check if file already exists
2267 fileexists = os.path.isfile(filename)
2268 leakfile = open(filename, "a")
2269 if not fileexists:
2270 # New file - add header
2271 leakfile.write(
2272 "# Memory Leak Detection for topotest %s\n\n"
2273 % testscript
2274 )
2275 leakfile.write("## Router %s\n" % self.name)
2276 leakfile.write("### Process %s\n" % daemon)
2277 log = re.sub("core_handler: ", "", log)
2278 log = re.sub(
2279 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
2280 r"\n#### \1\n",
2281 log,
2282 )
2283 log = re.sub("memstats: ", " ", log)
2284 leakfile.write(log)
2285 leakfile.write("\n")
2286 if leakfound:
2287 leakfile.close()
2288
2289
2290 def frr_unicode(s):
2291 """Convert string to unicode, depending on python version"""
2292 if sys.version_info[0] > 2:
2293 return s
2294 else:
2295 return unicode(s) # pylint: disable=E0602
2296
2297
2298 def is_mapping(o):
2299 return isinstance(o, Mapping)