]> git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/lib/topotest.py
pathd: New SR-TE policy management daemon
[mirror_frr.git] / tests / topotests / lib / topotest.py
1 #!/usr/bin/env python
2
3 #
4 # topotest.py
5 # Library of helper functions for NetDEF Topology Tests
6 #
7 # Copyright (c) 2016 by
8 # Network Device Education Foundation, Inc. ("NetDEF")
9 #
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
13 # in all copies.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 # OF THIS SOFTWARE.
23 #
24
25 import json
26 import os
27 import errno
28 import re
29 import sys
30 import functools
31 import glob
32 import subprocess
33 import tempfile
34 import platform
35 import difflib
36 import time
37 import signal
38
39 from lib.topolog import logger
40 from copy import deepcopy
41
42 if sys.version_info[0] > 2:
43 import configparser
44 else:
45 import ConfigParser as configparser
46
47 from mininet.topo import Topo
48 from mininet.net import Mininet
49 from mininet.node import Node, OVSSwitch, Host
50 from mininet.log import setLogLevel, info
51 from mininet.cli import CLI
52 from mininet.link import Intf
53
54
55 def gdb_core(obj, daemon, corefiles):
56 gdbcmds = """
57 info threads
58 bt full
59 disassemble
60 up
61 disassemble
62 up
63 disassemble
64 up
65 disassemble
66 up
67 disassemble
68 up
69 disassemble
70 """
71 gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")]
72 gdbcmds = [item for sl in gdbcmds for item in sl]
73
74 daemon_path = os.path.join(obj.daemondir, daemon)
75 backtrace = subprocess.check_output(
76 ["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds
77 )
78 sys.stderr.write(
79 "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon)
80 )
81 sys.stderr.write("%s" % backtrace)
82 return backtrace
83
84
85 class json_cmp_result(object):
86 "json_cmp result class for better assertion messages"
87
88 def __init__(self):
89 self.errors = []
90
91 def add_error(self, error):
92 "Append error message to the result"
93 for line in error.splitlines():
94 self.errors.append(line)
95
96 def has_errors(self):
97 "Returns True if there were errors, otherwise False."
98 return len(self.errors) > 0
99
100 def gen_report(self):
101 headline = ["Generated JSON diff error report:", ""]
102 return headline + self.errors
103
104 def __str__(self):
105 return (
106 "Generated JSON diff error report:\n\n\n" + "\n".join(self.errors) + "\n\n"
107 )
108
109
110 def gen_json_diff_report(d1, d2, exact=False, path="> $", acc=(0, "")):
111 """
112 Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye.
113 """
114
115 def dump_json(v):
116 if isinstance(v, (dict, list)):
117 return "\t" + "\t".join(
118 json.dumps(v, indent=4, separators=(",", ": ")).splitlines(True)
119 )
120 else:
121 return "'{}'".format(v)
122
123 def json_type(v):
124 if isinstance(v, (list, tuple)):
125 return "Array"
126 elif isinstance(v, dict):
127 return "Object"
128 elif isinstance(v, (int, float)):
129 return "Number"
130 elif isinstance(v, bool):
131 return "Boolean"
132 elif isinstance(v, str):
133 return "String"
134 elif v == None:
135 return "null"
136
137 def get_errors(other_acc):
138 return other_acc[1]
139
140 def get_errors_n(other_acc):
141 return other_acc[0]
142
143 def add_error(acc, msg, points=1):
144 return (acc[0] + points, acc[1] + "{}: {}\n".format(path, msg))
145
146 def merge_errors(acc, other_acc):
147 return (acc[0] + other_acc[0], acc[1] + other_acc[1])
148
149 def add_idx(idx):
150 return "{}[{}]".format(path, idx)
151
152 def add_key(key):
153 return "{}->{}".format(path, key)
154
155 def has_errors(other_acc):
156 return other_acc[0] > 0
157
158 if d2 == "*" or (
159 not isinstance(d1, (list, dict))
160 and not isinstance(d2, (list, dict))
161 and d1 == d2
162 ):
163 return acc
164 elif (
165 not isinstance(d1, (list, dict))
166 and not isinstance(d2, (list, dict))
167 and d1 != d2
168 ):
169 acc = add_error(
170 acc,
171 "d1 has element with value '{}' but in d2 it has value '{}'".format(d1, d2),
172 )
173 elif (
174 isinstance(d1, list)
175 and isinstance(d2, list)
176 and ((len(d2) > 0 and d2[0] == "__ordered__") or exact)
177 ):
178 if not exact:
179 del d2[0]
180 if len(d1) != len(d2):
181 acc = add_error(
182 acc,
183 "d1 has Array of length {} but in d2 it is of length {}".format(
184 len(d1), len(d2)
185 ),
186 )
187 else:
188 for idx, v1, v2 in zip(range(0, len(d1)), d1, d2):
189 acc = merge_errors(
190 acc, gen_json_diff_report(v1, v2, exact=exact, path=add_idx(idx))
191 )
192 elif isinstance(d1, list) and isinstance(d2, list):
193 if len(d1) < len(d2):
194 acc = add_error(
195 acc,
196 "d1 has Array of length {} but in d2 it is of length {}".format(
197 len(d1), len(d2)
198 ),
199 )
200 else:
201 for idx2, v2 in zip(range(0, len(d2)), d2):
202 found_match = False
203 closest_diff = None
204 closest_idx = None
205 for idx1, v1 in zip(range(0, len(d1)), d1):
206 tmp_v1 = deepcopy(v1)
207 tmp_v2 = deepcopy(v2)
208 tmp_diff = gen_json_diff_report(tmp_v1, tmp_v2, path=add_idx(idx1))
209 if not has_errors(tmp_diff):
210 found_match = True
211 del d1[idx1]
212 break
213 elif not closest_diff or get_errors_n(tmp_diff) < get_errors_n(
214 closest_diff
215 ):
216 closest_diff = tmp_diff
217 closest_idx = idx1
218 if not found_match and isinstance(v2, (list, dict)):
219 sub_error = "\n\n\t{}".format(
220 "\t".join(get_errors(closest_diff).splitlines(True))
221 )
222 acc = add_error(
223 acc,
224 (
225 "d2 has the following element at index {} which is not present in d1: "
226 + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}"
227 ).format(idx2, dump_json(v2), closest_idx, sub_error),
228 )
229 if not found_match and not isinstance(v2, (list, dict)):
230 acc = add_error(
231 acc,
232 "d2 has the following element at index {} which is not present in d1: {}".format(
233 idx2, dump_json(v2)
234 ),
235 )
236 elif isinstance(d1, dict) and isinstance(d2, dict) and exact:
237 invalid_keys_d1 = [k for k in d1.keys() if k not in d2.keys()]
238 invalid_keys_d2 = [k for k in d2.keys() if k not in d1.keys()]
239 for k in invalid_keys_d1:
240 acc = add_error(acc, "d1 has key '{}' which is not present in d2".format(k))
241 for k in invalid_keys_d2:
242 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
243 valid_keys_intersection = [k for k in d1.keys() if k in d2.keys()]
244 for k in valid_keys_intersection:
245 acc = merge_errors(
246 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
247 )
248 elif isinstance(d1, dict) and isinstance(d2, dict):
249 none_keys = [k for k, v in d2.items() if v == None]
250 none_keys_present = [k for k in d1.keys() if k in none_keys]
251 for k in none_keys_present:
252 acc = add_error(
253 acc, "d1 has key '{}' which is not supposed to be present".format(k)
254 )
255 keys = [k for k, v in d2.items() if v != None]
256 invalid_keys_intersection = [k for k in keys if k not in d1.keys()]
257 for k in invalid_keys_intersection:
258 acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
259 valid_keys_intersection = [k for k in keys if k in d1.keys()]
260 for k in valid_keys_intersection:
261 acc = merge_errors(
262 acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
263 )
264 else:
265 acc = add_error(
266 acc,
267 "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format(
268 json_type(d1), json_type(d2)
269 ),
270 points=2,
271 )
272
273 return acc
274
275
276 def json_cmp(d1, d2, exact=False):
277 """
278 JSON compare function. Receives two parameters:
279 * `d1`: parsed JSON data structure
280 * `d2`: parsed JSON data structure
281
282 Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
283 in d1, e.g. when d2 is a "subset" of d1 without honoring any order. Otherwise an
284 error report is generated and wrapped in a 'json_cmp_result()'. There are special
285 parameters and notations explained below which can be used to cover rather unusual
286 cases:
287
288 * when 'exact is set to 'True' then d1 and d2 are tested for equality (including
289 order within JSON Arrays)
290 * using 'null' (or 'None' in Python) as JSON Object value is checking for key
291 absence in d1
292 * using '*' as JSON Object value or Array value is checking for presence in d1
293 without checking the values
294 * using '__ordered__' as first element in a JSON Array in d2 will also check the
295 order when it is compared to an Array in d1
296 """
297
298 (errors_n, errors) = gen_json_diff_report(deepcopy(d1), deepcopy(d2), exact=exact)
299
300 if errors_n > 0:
301 result = json_cmp_result()
302 result.add_error(errors)
303 return result
304 else:
305 return None
306
307
308 def router_output_cmp(router, cmd, expected):
309 """
310 Runs `cmd` in router and compares the output with `expected`.
311 """
312 return difflines(
313 normalize_text(router.vtysh_cmd(cmd)),
314 normalize_text(expected),
315 title1="Current output",
316 title2="Expected output",
317 )
318
319
320 def router_json_cmp(router, cmd, data, exact=False):
321 """
322 Runs `cmd` that returns JSON data (normally the command ends with 'json')
323 and compare with `data` contents.
324 """
325 return json_cmp(router.vtysh_cmd(cmd, isjson=True), data, exact)
326
327
328 def run_and_expect(func, what, count=20, wait=3):
329 """
330 Run `func` and compare the result with `what`. Do it for `count` times
331 waiting `wait` seconds between tries. By default it tries 20 times with
332 3 seconds delay between tries.
333
334 Returns (True, func-return) on success or
335 (False, func-return) on failure.
336
337 ---
338
339 Helper functions to use with this function:
340 - router_output_cmp
341 - router_json_cmp
342 """
343 start_time = time.time()
344 func_name = "<unknown>"
345 if func.__class__ == functools.partial:
346 func_name = func.func.__name__
347 else:
348 func_name = func.__name__
349
350 logger.info(
351 "'{}' polling started (interval {} secs, maximum wait {} secs)".format(
352 func_name, wait, int(wait * count)
353 )
354 )
355
356 while count > 0:
357 result = func()
358 if result != what:
359 time.sleep(wait)
360 count -= 1
361 continue
362
363 end_time = time.time()
364 logger.info(
365 "'{}' succeeded after {:.2f} seconds".format(
366 func_name, end_time - start_time
367 )
368 )
369 return (True, result)
370
371 end_time = time.time()
372 logger.error(
373 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
374 )
375 return (False, result)
376
377
378 def run_and_expect_type(func, etype, count=20, wait=3, avalue=None):
379 """
380 Run `func` and compare the result with `etype`. Do it for `count` times
381 waiting `wait` seconds between tries. By default it tries 20 times with
382 3 seconds delay between tries.
383
384 This function is used when you want to test the return type and,
385 optionally, the return value.
386
387 Returns (True, func-return) on success or
388 (False, func-return) on failure.
389 """
390 start_time = time.time()
391 func_name = "<unknown>"
392 if func.__class__ == functools.partial:
393 func_name = func.func.__name__
394 else:
395 func_name = func.__name__
396
397 logger.info(
398 "'{}' polling started (interval {} secs, maximum wait {} secs)".format(
399 func_name, wait, int(wait * count)
400 )
401 )
402
403 while count > 0:
404 result = func()
405 if not isinstance(result, etype):
406 logger.debug(
407 "Expected result type '{}' got '{}' instead".format(etype, type(result))
408 )
409 time.sleep(wait)
410 count -= 1
411 continue
412
413 if etype != type(None) and avalue != None and result != avalue:
414 logger.debug("Expected value '{}' got '{}' instead".format(avalue, result))
415 time.sleep(wait)
416 count -= 1
417 continue
418
419 end_time = time.time()
420 logger.info(
421 "'{}' succeeded after {:.2f} seconds".format(
422 func_name, end_time - start_time
423 )
424 )
425 return (True, result)
426
427 end_time = time.time()
428 logger.error(
429 "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
430 )
431 return (False, result)
432
433
434 def int2dpid(dpid):
435 "Converting Integer to DPID"
436
437 try:
438 dpid = hex(dpid)[2:]
439 dpid = "0" * (16 - len(dpid)) + dpid
440 return dpid
441 except IndexError:
442 raise Exception(
443 "Unable to derive default datapath ID - "
444 "please either specify a dpid or use a "
445 "canonical switch name such as s23."
446 )
447
448
449 def pid_exists(pid):
450 "Check whether pid exists in the current process table."
451
452 if pid <= 0:
453 return False
454 try:
455 os.waitpid(pid, os.WNOHANG)
456 except:
457 pass
458 try:
459 os.kill(pid, 0)
460 except OSError as err:
461 if err.errno == errno.ESRCH:
462 # ESRCH == No such process
463 return False
464 elif err.errno == errno.EPERM:
465 # EPERM clearly means there's a process to deny access to
466 return True
467 else:
468 # According to "man 2 kill" possible error values are
469 # (EINVAL, EPERM, ESRCH)
470 raise
471 else:
472 return True
473
474
475 def get_textdiff(text1, text2, title1="", title2="", **opts):
476 "Returns empty string if same or formatted diff"
477
478 diff = "\n".join(
479 difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts)
480 )
481 # Clean up line endings
482 diff = os.linesep.join([s for s in diff.splitlines() if s])
483 return diff
484
485
486 def difflines(text1, text2, title1="", title2="", **opts):
487 "Wrapper for get_textdiff to avoid string transformations."
488 text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1)
489 text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1)
490 return get_textdiff(text1, text2, title1, title2, **opts)
491
492
493 def get_file(content):
494 """
495 Generates a temporary file in '/tmp' with `content` and returns the file name.
496 """
497 fde = tempfile.NamedTemporaryFile(mode="w", delete=False)
498 fname = fde.name
499 fde.write(content)
500 fde.close()
501 return fname
502
503
504 def normalize_text(text):
505 """
506 Strips formating spaces/tabs, carriage returns and trailing whitespace.
507 """
508 text = re.sub(r"[ \t]+", " ", text)
509 text = re.sub(r"\r", "", text)
510
511 # Remove whitespace in the middle of text.
512 text = re.sub(r"[ \t]+\n", "\n", text)
513 # Remove whitespace at the end of the text.
514 text = text.rstrip()
515
516 return text
517
518
519 def module_present_linux(module, load):
520 """
521 Returns whether `module` is present.
522
523 If `load` is true, it will try to load it via modprobe.
524 """
525 with open("/proc/modules", "r") as modules_file:
526 if module.replace("-", "_") in modules_file.read():
527 return True
528 cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module)
529 if os.system(cmd) != 0:
530 return False
531 else:
532 return True
533
534
535 def module_present_freebsd(module, load):
536 return True
537
538
539 def module_present(module, load=True):
540 if sys.platform.startswith("linux"):
541 return module_present_linux(module, load)
542 elif sys.platform.startswith("freebsd"):
543 return module_present_freebsd(module, load)
544
545
546 def version_cmp(v1, v2):
547 """
548 Compare two version strings and returns:
549
550 * `-1`: if `v1` is less than `v2`
551 * `0`: if `v1` is equal to `v2`
552 * `1`: if `v1` is greater than `v2`
553
554 Raises `ValueError` if versions are not well formated.
555 """
556 vregex = r"(?P<whole>\d+(\.(\d+))*)"
557 v1m = re.match(vregex, v1)
558 v2m = re.match(vregex, v2)
559 if v1m is None or v2m is None:
560 raise ValueError("got a invalid version string")
561
562 # Split values
563 v1g = v1m.group("whole").split(".")
564 v2g = v2m.group("whole").split(".")
565
566 # Get the longest version string
567 vnum = len(v1g)
568 if len(v2g) > vnum:
569 vnum = len(v2g)
570
571 # Reverse list because we are going to pop the tail
572 v1g.reverse()
573 v2g.reverse()
574 for _ in range(vnum):
575 try:
576 v1n = int(v1g.pop())
577 except IndexError:
578 while v2g:
579 v2n = int(v2g.pop())
580 if v2n > 0:
581 return -1
582 break
583
584 try:
585 v2n = int(v2g.pop())
586 except IndexError:
587 if v1n > 0:
588 return 1
589 while v1g:
590 v1n = int(v1g.pop())
591 if v1n > 0:
592 return 1
593 break
594
595 if v1n > v2n:
596 return 1
597 if v1n < v2n:
598 return -1
599 return 0
600
601
602 def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None):
603 if ifaceaction:
604 str_ifaceaction = "no shutdown"
605 else:
606 str_ifaceaction = "shutdown"
607 if vrf_name == None:
608 cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
609 ifacename, str_ifaceaction
610 )
611 else:
612 cmd = 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
613 ifacename, vrf_name, str_ifaceaction
614 )
615 node.run(cmd)
616
617
618 def ip4_route_zebra(node, vrf_name=None):
619 """
620 Gets an output of 'show ip route' command. It can be used
621 with comparing the output to a reference
622 """
623 if vrf_name == None:
624 tmp = node.vtysh_cmd("show ip route")
625 else:
626 tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name))
627 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
628
629 lines = output.splitlines()
630 header_found = False
631 while lines and (not lines[0].strip() or not header_found):
632 if "o - offload failure" in lines[0]:
633 header_found = True
634 lines = lines[1:]
635 return "\n".join(lines)
636
637
638 def ip6_route_zebra(node, vrf_name=None):
639 """
640 Retrieves the output of 'show ipv6 route [vrf vrf_name]', then
641 canonicalizes it by eliding link-locals.
642 """
643
644 if vrf_name == None:
645 tmp = node.vtysh_cmd("show ipv6 route")
646 else:
647 tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name))
648
649 # Mask out timestamp
650 output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
651
652 # Mask out the link-local addresses
653 output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output)
654
655 lines = output.splitlines()
656 header_found = False
657 while lines and (not lines[0].strip() or not header_found):
658 if "o - offload failure" in lines[0]:
659 header_found = True
660 lines = lines[1:]
661
662 return "\n".join(lines)
663
664
665 def proto_name_to_number(protocol):
666 return {
667 "bgp": "186",
668 "isis": "187",
669 "ospf": "188",
670 "rip": "189",
671 "ripng": "190",
672 "nhrp": "191",
673 "eigrp": "192",
674 "ldp": "193",
675 "sharp": "194",
676 "pbr": "195",
677 "static": "196",
678 }.get(
679 protocol, protocol
680 ) # default return same as input
681
682
683 def ip4_route(node):
684 """
685 Gets a structured return of the command 'ip route'. It can be used in
686 conjuction with json_cmp() to provide accurate assert explanations.
687
688 Return example:
689 {
690 '10.0.1.0/24': {
691 'dev': 'eth0',
692 'via': '172.16.0.1',
693 'proto': '188',
694 },
695 '10.0.2.0/24': {
696 'dev': 'eth1',
697 'proto': 'kernel',
698 }
699 }
700 """
701 output = normalize_text(node.run("ip route")).splitlines()
702 result = {}
703 for line in output:
704 columns = line.split(" ")
705 route = result[columns[0]] = {}
706 prev = None
707 for column in columns:
708 if prev == "dev":
709 route["dev"] = column
710 if prev == "via":
711 route["via"] = column
712 if prev == "proto":
713 # translate protocol names back to numbers
714 route["proto"] = proto_name_to_number(column)
715 if prev == "metric":
716 route["metric"] = column
717 if prev == "scope":
718 route["scope"] = column
719 prev = column
720
721 return result
722
723
724 def ip4_vrf_route(node):
725 """
726 Gets a structured return of the command 'ip route show vrf {0}-cust1'.
727 It can be used in conjuction with json_cmp() to provide accurate assert explanations.
728
729 Return example:
730 {
731 '10.0.1.0/24': {
732 'dev': 'eth0',
733 'via': '172.16.0.1',
734 'proto': '188',
735 },
736 '10.0.2.0/24': {
737 'dev': 'eth1',
738 'proto': 'kernel',
739 }
740 }
741 """
742 output = normalize_text(
743 node.run("ip route show vrf {0}-cust1".format(node.name))
744 ).splitlines()
745
746 result = {}
747 for line in output:
748 columns = line.split(" ")
749 route = result[columns[0]] = {}
750 prev = None
751 for column in columns:
752 if prev == "dev":
753 route["dev"] = column
754 if prev == "via":
755 route["via"] = column
756 if prev == "proto":
757 # translate protocol names back to numbers
758 route["proto"] = proto_name_to_number(column)
759 if prev == "metric":
760 route["metric"] = column
761 if prev == "scope":
762 route["scope"] = column
763 prev = column
764
765 return result
766
767
768 def ip6_route(node):
769 """
770 Gets a structured return of the command 'ip -6 route'. It can be used in
771 conjuction with json_cmp() to provide accurate assert explanations.
772
773 Return example:
774 {
775 '2001:db8:1::/64': {
776 'dev': 'eth0',
777 'proto': '188',
778 },
779 '2001:db8:2::/64': {
780 'dev': 'eth1',
781 'proto': 'kernel',
782 }
783 }
784 """
785 output = normalize_text(node.run("ip -6 route")).splitlines()
786 result = {}
787 for line in output:
788 columns = line.split(" ")
789 route = result[columns[0]] = {}
790 prev = None
791 for column in columns:
792 if prev == "dev":
793 route["dev"] = column
794 if prev == "via":
795 route["via"] = column
796 if prev == "proto":
797 # translate protocol names back to numbers
798 route["proto"] = proto_name_to_number(column)
799 if prev == "metric":
800 route["metric"] = column
801 if prev == "pref":
802 route["pref"] = column
803 prev = column
804
805 return result
806
807
808 def ip6_vrf_route(node):
809 """
810 Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'.
811 It can be used in conjuction with json_cmp() to provide accurate assert explanations.
812
813 Return example:
814 {
815 '2001:db8:1::/64': {
816 'dev': 'eth0',
817 'proto': '188',
818 },
819 '2001:db8:2::/64': {
820 'dev': 'eth1',
821 'proto': 'kernel',
822 }
823 }
824 """
825 output = normalize_text(
826 node.run("ip -6 route show vrf {0}-cust1".format(node.name))
827 ).splitlines()
828 result = {}
829 for line in output:
830 columns = line.split(" ")
831 route = result[columns[0]] = {}
832 prev = None
833 for column in columns:
834 if prev == "dev":
835 route["dev"] = column
836 if prev == "via":
837 route["via"] = column
838 if prev == "proto":
839 # translate protocol names back to numbers
840 route["proto"] = proto_name_to_number(column)
841 if prev == "metric":
842 route["metric"] = column
843 if prev == "pref":
844 route["pref"] = column
845 prev = column
846
847 return result
848
849
850 def ip_rules(node):
851 """
852 Gets a structured return of the command 'ip rule'. It can be used in
853 conjuction with json_cmp() to provide accurate assert explanations.
854
855 Return example:
856 [
857 {
858 "pref": "0"
859 "from": "all"
860 },
861 {
862 "pref": "32766"
863 "from": "all"
864 },
865 {
866 "to": "3.4.5.0/24",
867 "iif": "r1-eth2",
868 "pref": "304",
869 "from": "1.2.0.0/16",
870 "proto": "zebra"
871 }
872 ]
873 """
874 output = normalize_text(node.run("ip rule")).splitlines()
875 result = []
876 for line in output:
877 columns = line.split(" ")
878
879 route = {}
880 # remove last character, since it is ':'
881 pref = columns[0][:-1]
882 route["pref"] = pref
883 prev = None
884 for column in columns:
885 if prev == "from":
886 route["from"] = column
887 if prev == "to":
888 route["to"] = column
889 if prev == "proto":
890 route["proto"] = column
891 if prev == "iif":
892 route["iif"] = column
893 if prev == "fwmark":
894 route["fwmark"] = column
895 prev = column
896
897 result.append(route)
898 return result
899
900
901 def sleep(amount, reason=None):
902 """
903 Sleep wrapper that registers in the log the amount of sleep
904 """
905 if reason is None:
906 logger.info("Sleeping for {} seconds".format(amount))
907 else:
908 logger.info(reason + " ({} seconds)".format(amount))
909
910 time.sleep(amount)
911
912
913 def checkAddressSanitizerError(output, router, component, logdir=""):
914 "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
915
916 def processAddressSanitizerError(asanErrorRe, output, router, component):
917 sys.stderr.write(
918 "%s: %s triggered an exception by AddressSanitizer\n" % (router, component)
919 )
920 # Sanitizer Error found in log
921 pidMark = asanErrorRe.group(1)
922 addressSanitizerLog = re.search(
923 "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL
924 )
925 if addressSanitizerLog:
926 # Find Calling Test. Could be multiple steps back
927 testframe=sys._current_frames().values()[0]
928 level=0
929 while level < 10:
930 test=os.path.splitext(os.path.basename(testframe.f_globals["__file__"]))[0]
931 if (test != "topotest") and (test != "topogen"):
932 # Found the calling test
933 callingTest=os.path.basename(testframe.f_globals["__file__"])
934 break
935 level=level+1
936 testframe=testframe.f_back
937 if (level >= 10):
938 # somehow couldn't find the test script.
939 callingTest="unknownTest"
940 #
941 # Now finding Calling Procedure
942 level=0
943 while level < 20:
944 callingProc=sys._getframe(level).f_code.co_name
945 if ((callingProc != "processAddressSanitizerError") and
946 (callingProc != "checkAddressSanitizerError") and
947 (callingProc != "checkRouterCores") and
948 (callingProc != "stopRouter") and
949 (callingProc != "__stop_internal") and
950 (callingProc != "stop") and
951 (callingProc != "stop_topology") and
952 (callingProc != "checkRouterRunning") and
953 (callingProc != "check_router_running") and
954 (callingProc != "routers_have_failure")):
955 # Found the calling test
956 break
957 level=level+1
958 if (level >= 20):
959 # something wrong - couldn't found the calling test function
960 callingProc="unknownProc"
961 with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile:
962 sys.stderr.write(
963 "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
964 % (callingTest, callingProc, router)
965 )
966 sys.stderr.write(
967 "\n".join(addressSanitizerLog.group(1).splitlines()) + "\n"
968 )
969 addrSanFile.write("## Error: %s\n\n" % asanErrorRe.group(2))
970 addrSanFile.write(
971 "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
972 % (callingTest, callingProc, router)
973 )
974 addrSanFile.write(
975 " "
976 + "\n ".join(addressSanitizerLog.group(1).splitlines())
977 + "\n"
978 )
979 addrSanFile.write("\n---------------\n")
980 return
981
982
983 addressSanitizerError = re.search(
984 "(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
985 )
986 if addressSanitizerError:
987 processAddressSanitizerError(addressSanitizerError, output, router, component)
988 return True
989
990 # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
991 if logdir:
992 filepattern=logdir+"/"+router+"/"+component+".asan.*"
993 logger.debug("Log check for %s on %s, pattern %s\n" % (component, router, filepattern))
994 for file in glob.glob(filepattern):
995 with open(file, "r") as asanErrorFile:
996 asanError=asanErrorFile.read()
997 addressSanitizerError = re.search(
998 "(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
999 )
1000 if addressSanitizerError:
1001 processAddressSanitizerError(addressSanitizerError, asanError, router, component)
1002 return True
1003 return False
1004
1005
1006 def addRouter(topo, name):
1007 "Adding a FRRouter to Topology"
1008
1009 MyPrivateDirs = [
1010 "/etc/frr",
1011 "/var/run/frr",
1012 "/var/log",
1013 ]
1014 if sys.platform.startswith("linux"):
1015 return topo.addNode(name, cls=LinuxRouter, privateDirs=MyPrivateDirs)
1016 elif sys.platform.startswith("freebsd"):
1017 return topo.addNode(name, cls=FreeBSDRouter, privateDirs=MyPrivateDirs)
1018
1019
1020 def set_sysctl(node, sysctl, value):
1021 "Set a sysctl value and return None on success or an error string"
1022 valuestr = "{}".format(value)
1023 command = "sysctl {0}={1}".format(sysctl, valuestr)
1024 cmdret = node.cmd(command)
1025
1026 matches = re.search(r"([^ ]+) = ([^\s]+)", cmdret)
1027 if matches is None:
1028 return cmdret
1029 if matches.group(1) != sysctl:
1030 return cmdret
1031 if matches.group(2) != valuestr:
1032 return cmdret
1033
1034 return None
1035
1036
1037 def assert_sysctl(node, sysctl, value):
1038 "Set and assert that the sysctl is set with the specified value."
1039 assert set_sysctl(node, sysctl, value) is None
1040
1041
1042 class Router(Node):
1043 "A Node with IPv4/IPv6 forwarding enabled"
1044
1045 def __init__(self, name, **params):
1046 super(Router, self).__init__(name, **params)
1047 self.logdir = params.get("logdir")
1048
1049 # Backward compatibility:
1050 # Load configuration defaults like topogen.
1051 self.config_defaults = configparser.ConfigParser(
1052 defaults={
1053 "verbosity": "info",
1054 "frrdir": "/usr/lib/frr",
1055 "routertype": "frr",
1056 "memleak_path": "",
1057 }
1058 )
1059 self.config_defaults.read(
1060 os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini")
1061 )
1062
1063 # If this topology is using old API and doesn't have logdir
1064 # specified, then attempt to generate an unique logdir.
1065 if self.logdir is None:
1066 cur_test = os.environ["PYTEST_CURRENT_TEST"]
1067 self.logdir = "/tmp/topotests/" + cur_test[
1068 cur_test.find("/")+1 : cur_test.find(".py")
1069 ].replace("/", ".")
1070
1071 # If the logdir is not created, then create it and set the
1072 # appropriated permissions.
1073 if not os.path.isdir(self.logdir):
1074 os.system("mkdir -p " + self.logdir + "/" + name)
1075 os.system("chmod -R go+rw /tmp/topotests")
1076 # Erase logs of previous run
1077 os.system("rm -rf " + self.logdir + "/" + name)
1078
1079 self.daemondir = None
1080 self.hasmpls = False
1081 self.routertype = "frr"
1082 self.daemons = {
1083 "zebra": 0,
1084 "ripd": 0,
1085 "ripngd": 0,
1086 "ospfd": 0,
1087 "ospf6d": 0,
1088 "isisd": 0,
1089 "bgpd": 0,
1090 "pimd": 0,
1091 "ldpd": 0,
1092 "eigrpd": 0,
1093 "nhrpd": 0,
1094 "staticd": 0,
1095 "bfdd": 0,
1096 "sharpd": 0,
1097 "babeld": 0,
1098 "pbrd": 0,
1099 'pathd': 0
1100 }
1101 self.daemons_options = {"zebra": ""}
1102 self.reportCores = True
1103 self.version = None
1104
1105 def _config_frr(self, **params):
1106 "Configure FRR binaries"
1107 self.daemondir = params.get("frrdir")
1108 if self.daemondir is None:
1109 self.daemondir = self.config_defaults.get("topogen", "frrdir")
1110
1111 zebra_path = os.path.join(self.daemondir, "zebra")
1112 if not os.path.isfile(zebra_path):
1113 raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
1114
1115 # pylint: disable=W0221
1116 # Some params are only meaningful for the parent class.
1117 def config(self, **params):
1118 super(Router, self).config(**params)
1119
1120 # User did not specify the daemons directory, try to autodetect it.
1121 self.daemondir = params.get("daemondir")
1122 if self.daemondir is None:
1123 self.routertype = params.get(
1124 "routertype", self.config_defaults.get("topogen", "routertype")
1125 )
1126 self._config_frr(**params)
1127 else:
1128 # Test the provided path
1129 zpath = os.path.join(self.daemondir, "zebra")
1130 if not os.path.isfile(zpath):
1131 raise Exception("No zebra binary found in {}".format(zpath))
1132 # Allow user to specify routertype when the path was specified.
1133 if params.get("routertype") is not None:
1134 self.routertype = params.get("routertype")
1135
1136 self.cmd("ulimit -c unlimited")
1137 # Set ownership of config files
1138 self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype))
1139
1140 def terminate(self):
1141 # Stop running FRR daemons
1142 self.stopRouter()
1143
1144 # Disable forwarding
1145 set_sysctl(self, "net.ipv4.ip_forward", 0)
1146 set_sysctl(self, "net.ipv6.conf.all.forwarding", 0)
1147 super(Router, self).terminate()
1148 os.system("chmod -R go+rw /tmp/topotests")
1149
1150 # Return count of running daemons
1151 def listDaemons(self):
1152 ret = []
1153 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1154 errors = ""
1155 if re.search(r"No such file or directory", rundaemons):
1156 return 0
1157 if rundaemons is not None:
1158 bet = rundaemons.split("\n")
1159 for d in bet[:-1]:
1160 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1161 if daemonpid.isdigit() and pid_exists(int(daemonpid)):
1162 ret.append(os.path.basename(d.rstrip().rsplit(".", 1)[0]))
1163
1164 return ret
1165
1166 def stopRouter(self, wait=True, assertOnError=True, minErrorVersion="5.1"):
1167 # Stop Running FRR Daemons
1168 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1169 errors = ""
1170 if re.search(r"No such file or directory", rundaemons):
1171 return errors
1172 if rundaemons is not None:
1173 dmns = rundaemons.split("\n")
1174 # Exclude empty string at end of list
1175 for d in dmns[:-1]:
1176 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1177 if daemonpid.isdigit() and pid_exists(int(daemonpid)):
1178 daemonname = os.path.basename(d.rstrip().rsplit(".", 1)[0])
1179 logger.info("{}: stopping {}".format(self.name, daemonname))
1180 try:
1181 os.kill(int(daemonpid), signal.SIGTERM)
1182 except OSError as err:
1183 if err.errno == errno.ESRCH:
1184 logger.error(
1185 "{}: {} left a dead pidfile (pid={})".format(
1186 self.name, daemonname, daemonpid
1187 )
1188 )
1189 else:
1190 logger.info(
1191 "{}: {} could not kill pid {}: {}".format(
1192 self.name, daemonname, daemonpid, str(err)
1193 )
1194 )
1195
1196 if not wait:
1197 return errors
1198
1199 running = self.listDaemons()
1200
1201 if running:
1202 sleep(
1203 0.1,
1204 "{}: waiting for daemons stopping: {}".format(
1205 self.name, ", ".join(running)
1206 ),
1207 )
1208 running = self.listDaemons()
1209
1210 counter = 20
1211 while counter > 0 and running:
1212 sleep(
1213 0.5,
1214 "{}: waiting for daemons stopping: {}".format(
1215 self.name, ", ".join(running)
1216 ),
1217 )
1218 running = self.listDaemons()
1219 counter -= 1
1220
1221 if running:
1222 # 2nd round of kill if daemons didn't exit
1223 dmns = rundaemons.split("\n")
1224 # Exclude empty string at end of list
1225 for d in dmns[:-1]:
1226 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1227 if daemonpid.isdigit() and pid_exists(int(daemonpid)):
1228 logger.info(
1229 "{}: killing {}".format(
1230 self.name,
1231 os.path.basename(d.rstrip().rsplit(".", 1)[0]),
1232 )
1233 )
1234 self.cmd("kill -7 %s" % daemonpid)
1235 self.waitOutput()
1236 self.cmd("rm -- {}".format(d.rstrip()))
1237
1238 if not wait:
1239 return errors
1240
1241 errors = self.checkRouterCores(reportOnce=True)
1242 if self.checkRouterVersion("<", minErrorVersion):
1243 # ignore errors in old versions
1244 errors = ""
1245 if assertOnError and errors is not None and len(errors) > 0:
1246 assert "Errors found - details follow:" == 0, errors
1247 return errors
1248
1249 def removeIPs(self):
1250 for interface in self.intfNames():
1251 self.cmd("ip address flush", interface)
1252
1253 def checkCapability(self, daemon, param):
1254 if param is not None:
1255 daemon_path = os.path.join(self.daemondir, daemon)
1256 daemon_search_option = param.replace("-", "")
1257 output = self.cmd(
1258 "{0} -h | grep {1}".format(daemon_path, daemon_search_option)
1259 )
1260 if daemon_search_option not in output:
1261 return False
1262 return True
1263
1264 def loadConf(self, daemon, source=None, param=None):
1265 # print "Daemons before:", self.daemons
1266 if daemon in self.daemons.keys():
1267 self.daemons[daemon] = 1
1268 if param is not None:
1269 self.daemons_options[daemon] = param
1270 if source is None:
1271 self.cmd("touch /etc/%s/%s.conf" % (self.routertype, daemon))
1272 self.waitOutput()
1273 else:
1274 self.cmd("cp %s /etc/%s/%s.conf" % (source, self.routertype, daemon))
1275 self.waitOutput()
1276 self.cmd("chmod 640 /etc/%s/%s.conf" % (self.routertype, daemon))
1277 self.waitOutput()
1278 self.cmd(
1279 "chown %s:%s /etc/%s/%s.conf"
1280 % (self.routertype, self.routertype, self.routertype, daemon)
1281 )
1282 self.waitOutput()
1283 if (daemon == "zebra") and (self.daemons["staticd"] == 0):
1284 # Add staticd with zebra - if it exists
1285 staticd_path = os.path.join(self.daemondir, "staticd")
1286 if os.path.isfile(staticd_path):
1287 self.daemons["staticd"] = 1
1288 self.daemons_options["staticd"] = ""
1289 # Auto-Started staticd has no config, so it will read from zebra config
1290 else:
1291 logger.info("No daemon {} known".format(daemon))
1292 # print "Daemons after:", self.daemons
1293
1294 def startRouter(self, tgen=None):
1295 # Disable integrated-vtysh-config
1296 self.cmd(
1297 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1298 % self.routertype
1299 )
1300 self.cmd(
1301 "chown %s:%svty /etc/%s/vtysh.conf"
1302 % (self.routertype, self.routertype, self.routertype)
1303 )
1304 # TODO remove the following lines after all tests are migrated to Topogen.
1305 # Try to find relevant old logfiles in /tmp and delete them
1306 map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
1307 # Remove old core files
1308 map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name)))
1309 # Remove IP addresses from OS first - we have them in zebra.conf
1310 self.removeIPs()
1311 # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
1312 # No error - but return message and skip all the tests
1313 if self.daemons["ldpd"] == 1:
1314 ldpd_path = os.path.join(self.daemondir, "ldpd")
1315 if not os.path.isfile(ldpd_path):
1316 logger.info("LDP Test, but no ldpd compiled or installed")
1317 return "LDP Test, but no ldpd compiled or installed"
1318
1319 if version_cmp(platform.release(), "4.5") < 0:
1320 logger.info("LDP Test need Linux Kernel 4.5 minimum")
1321 return "LDP Test need Linux Kernel 4.5 minimum"
1322 # Check if have mpls
1323 if tgen != None:
1324 self.hasmpls = tgen.hasmpls
1325 if self.hasmpls != True:
1326 logger.info(
1327 "LDP/MPLS Tests will be skipped, platform missing module(s)"
1328 )
1329 else:
1330 # Test for MPLS Kernel modules available
1331 self.hasmpls = False
1332 if not module_present("mpls-router"):
1333 logger.info(
1334 "MPLS tests will not run (missing mpls-router kernel module)"
1335 )
1336 elif not module_present("mpls-iptunnel"):
1337 logger.info(
1338 "MPLS tests will not run (missing mpls-iptunnel kernel module)"
1339 )
1340 else:
1341 self.hasmpls = True
1342 if self.hasmpls != True:
1343 return "LDP/MPLS Tests need mpls kernel modules"
1344 self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
1345
1346 if self.daemons["eigrpd"] == 1:
1347 eigrpd_path = os.path.join(self.daemondir, "eigrpd")
1348 if not os.path.isfile(eigrpd_path):
1349 logger.info("EIGRP Test, but no eigrpd compiled or installed")
1350 return "EIGRP Test, but no eigrpd compiled or installed"
1351
1352 if self.daemons["bfdd"] == 1:
1353 bfdd_path = os.path.join(self.daemondir, "bfdd")
1354 if not os.path.isfile(bfdd_path):
1355 logger.info("BFD Test, but no bfdd compiled or installed")
1356 return "BFD Test, but no bfdd compiled or installed"
1357
1358 return self.startRouterDaemons()
1359
1360 def getStdErr(self, daemon):
1361 return self.getLog("err", daemon)
1362
1363 def getStdOut(self, daemon):
1364 return self.getLog("out", daemon)
1365
1366 def getLog(self, log, daemon):
1367 return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log))
1368
1369 def startRouterDaemons(self, daemons=None):
1370 "Starts all FRR daemons for this router."
1371
1372 bundle_data = ""
1373
1374 if os.path.exists("/etc/frr/support_bundle_commands.conf"):
1375 bundle_data = subprocess.check_output(
1376 ["cat /etc/frr/support_bundle_commands.conf"], shell=True
1377 )
1378 self.cmd(
1379 "echo '{}' > /etc/frr/support_bundle_commands.conf".format(bundle_data)
1380 )
1381
1382 # Starts actual daemons without init (ie restart)
1383 # cd to per node directory
1384 self.cmd("install -d {}/{}".format(self.logdir, self.name))
1385 self.cmd("cd {}/{}".format(self.logdir, self.name))
1386 self.cmd("umask 000")
1387
1388 # Re-enable to allow for report per run
1389 self.reportCores = True
1390
1391 # XXX: glue code forward ported from removed function.
1392 if self.version == None:
1393 self.version = self.cmd(
1394 os.path.join(self.daemondir, "bgpd") + " -v"
1395 ).split()[2]
1396 logger.info("{}: running version: {}".format(self.name, self.version))
1397
1398 # If `daemons` was specified then some upper API called us with
1399 # specific daemons, otherwise just use our own configuration.
1400 daemons_list = []
1401 if daemons != None:
1402 daemons_list = daemons
1403 else:
1404 # Append all daemons configured.
1405 for daemon in self.daemons:
1406 if self.daemons[daemon] == 1:
1407 daemons_list.append(daemon)
1408
1409 # Start Zebra first
1410 if "zebra" in daemons_list:
1411 zebra_path = os.path.join(self.daemondir, "zebra")
1412 zebra_option = self.daemons_options["zebra"]
1413 self.cmd(
1414 "ASAN_OPTIONS=log_path=zebra.asan {0} {1} --log file:zebra.log --log-level debug -s 90000000 -d > zebra.out 2> zebra.err".format(
1415 zebra_path, zebra_option, self.logdir, self.name
1416 )
1417 )
1418 logger.debug("{}: {} zebra started".format(self, self.routertype))
1419
1420 # Remove `zebra` so we don't attempt to start it again.
1421 while "zebra" in daemons_list:
1422 daemons_list.remove("zebra")
1423
1424 # Start staticd next if required
1425 if "staticd" in daemons_list:
1426 staticd_path = os.path.join(self.daemondir, "staticd")
1427 staticd_option = self.daemons_options["staticd"]
1428 self.cmd(
1429 "ASAN_OPTIONS=log_path=staticd.asan {0} {1} --log file:staticd.log --log-level debug -d > staticd.out 2> staticd.err".format(
1430 staticd_path, staticd_option, self.logdir, self.name
1431 )
1432 )
1433 logger.debug("{}: {} staticd started".format(self, self.routertype))
1434
1435 # Remove `staticd` so we don't attempt to start it again.
1436 while "staticd" in daemons_list:
1437 daemons_list.remove("staticd")
1438
1439 # Fix Link-Local Addresses
1440 # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
1441 self.cmd(
1442 "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done"
1443 )
1444
1445 # Now start all the other daemons
1446 for daemon in daemons_list:
1447 # Skip disabled daemons and zebra
1448 if self.daemons[daemon] == 0:
1449 continue
1450
1451 daemon_path = os.path.join(self.daemondir, daemon)
1452 self.cmd(
1453 "ASAN_OPTIONS=log_path={2}.asan {0} {1} --log file:{2}.log --log-level debug -d > {2}.out 2> {2}.err".format(
1454 daemon_path, self.daemons_options.get(daemon, ""), daemon
1455 )
1456 )
1457 logger.debug("{}: {} {} started".format(self, self.routertype, daemon))
1458
1459 # Check if daemons are running.
1460 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1461 if re.search(r"No such file or directory", rundaemons):
1462 return "Daemons are not running"
1463
1464 return ""
1465
1466 def killRouterDaemons(
1467 self, daemons, wait=True, assertOnError=True, minErrorVersion="5.1"
1468 ):
1469 # Kill Running FRR
1470 # Daemons(user specified daemon only) using SIGKILL
1471 rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
1472 errors = ""
1473 daemonsNotRunning = []
1474 if re.search(r"No such file or directory", rundaemons):
1475 return errors
1476 for daemon in daemons:
1477 if rundaemons is not None and daemon in rundaemons:
1478 numRunning = 0
1479 dmns = rundaemons.split("\n")
1480 # Exclude empty string at end of list
1481 for d in dmns[:-1]:
1482 if re.search(r"%s" % daemon, d):
1483 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1484 if daemonpid.isdigit() and pid_exists(int(daemonpid)):
1485 logger.info(
1486 "{}: killing {}".format(
1487 self.name,
1488 os.path.basename(d.rstrip().rsplit(".", 1)[0]),
1489 )
1490 )
1491 self.cmd("kill -9 %s" % daemonpid)
1492 self.waitOutput()
1493 if pid_exists(int(daemonpid)):
1494 numRunning += 1
1495 if wait and numRunning > 0:
1496 sleep(
1497 2,
1498 "{}: waiting for {} daemon to be stopped".format(
1499 self.name, daemon
1500 ),
1501 )
1502
1503 # 2nd round of kill if daemons didn't exit
1504 for d in dmns[:-1]:
1505 if re.search(r"%s" % daemon, d):
1506 daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
1507 if daemonpid.isdigit() and pid_exists(
1508 int(daemonpid)
1509 ):
1510 logger.info(
1511 "{}: killing {}".format(
1512 self.name,
1513 os.path.basename(
1514 d.rstrip().rsplit(".", 1)[0]
1515 ),
1516 )
1517 )
1518 self.cmd("kill -9 %s" % daemonpid)
1519 self.waitOutput()
1520 self.cmd("rm -- {}".format(d.rstrip()))
1521 if wait:
1522 errors = self.checkRouterCores(reportOnce=True)
1523 if self.checkRouterVersion("<", minErrorVersion):
1524 # ignore errors in old versions
1525 errors = ""
1526 if assertOnError and len(errors) > 0:
1527 assert "Errors found - details follow:" == 0, errors
1528 else:
1529 daemonsNotRunning.append(daemon)
1530 if len(daemonsNotRunning) > 0:
1531 errors = errors + "Daemons are not running", daemonsNotRunning
1532
1533 return errors
1534
1535 def checkRouterCores(self, reportLeaks=True, reportOnce=False):
1536 if reportOnce and not self.reportCores:
1537 return
1538 reportMade = False
1539 traces = ""
1540 for daemon in self.daemons:
1541 if self.daemons[daemon] == 1:
1542 # Look for core file
1543 corefiles = glob.glob(
1544 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
1545 )
1546 if len(corefiles) > 0:
1547 backtrace = gdb_core(self, daemon, corefiles)
1548 traces = (
1549 traces
1550 + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s"
1551 % (self.name, daemon, backtrace)
1552 )
1553 reportMade = True
1554 elif reportLeaks:
1555 log = self.getStdErr(daemon)
1556 if "memstats" in log:
1557 sys.stderr.write(
1558 "%s: %s has memory leaks:\n" % (self.name, daemon)
1559 )
1560 traces = traces + "\n%s: %s has memory leaks:\n" % (
1561 self.name,
1562 daemon,
1563 )
1564 log = re.sub("core_handler: ", "", log)
1565 log = re.sub(
1566 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
1567 r"\n ## \1",
1568 log,
1569 )
1570 log = re.sub("memstats: ", " ", log)
1571 sys.stderr.write(log)
1572 reportMade = True
1573 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
1574 if checkAddressSanitizerError(
1575 self.getStdErr(daemon), self.name, daemon, self.logdir
1576 ):
1577 sys.stderr.write(
1578 "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)
1579 )
1580 traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (
1581 self.name,
1582 daemon,
1583 )
1584 reportMade = True
1585 if reportMade:
1586 self.reportCores = False
1587 return traces
1588
1589 def checkRouterRunning(self):
1590 "Check if router daemons are running and collect crashinfo they don't run"
1591
1592 global fatal_error
1593
1594 daemonsRunning = self.cmd(
1595 'vtysh -c "show logging" | grep "Logging configuration for"'
1596 )
1597 # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
1598 if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"):
1599 return "%s: vtysh killed by AddressSanitizer" % (self.name)
1600
1601 for daemon in self.daemons:
1602 if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
1603 sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
1604 if daemon == "staticd":
1605 sys.stderr.write(
1606 "You may have a copy of staticd installed but are attempting to test against\n"
1607 )
1608 sys.stderr.write(
1609 "a version of FRR that does not have staticd, please cleanup the install dir\n"
1610 )
1611
1612 # Look for core file
1613 corefiles = glob.glob(
1614 "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
1615 )
1616 if len(corefiles) > 0:
1617 gdb_core(self, daemon, corefiles)
1618 else:
1619 # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
1620 if os.path.isfile(
1621 "{}/{}/{}.log".format(self.logdir, self.name, daemon)
1622 ):
1623 log_tail = subprocess.check_output(
1624 [
1625 "tail -n20 {}/{}/{}.log 2> /dev/null".format(
1626 self.logdir, self.name, daemon
1627 )
1628 ],
1629 shell=True,
1630 )
1631 sys.stderr.write(
1632 "\nFrom %s %s %s log file:\n"
1633 % (self.routertype, self.name, daemon)
1634 )
1635 sys.stderr.write("%s\n" % log_tail)
1636
1637 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
1638 if checkAddressSanitizerError(
1639 self.getStdErr(daemon), self.name, daemon, self.logdir
1640 ):
1641 return "%s: Daemon %s not running - killed by AddressSanitizer" % (
1642 self.name,
1643 daemon,
1644 )
1645
1646 return "%s: Daemon %s not running" % (self.name, daemon)
1647 return ""
1648
1649 def checkRouterVersion(self, cmpop, version):
1650 """
1651 Compares router version using operation `cmpop` with `version`.
1652 Valid `cmpop` values:
1653 * `>=`: has the same version or greater
1654 * '>': has greater version
1655 * '=': has the same version
1656 * '<': has a lesser version
1657 * '<=': has the same version or lesser
1658
1659 Usage example: router.checkRouterVersion('>', '1.0')
1660 """
1661
1662 # Make sure we have version information first
1663 if self.version == None:
1664 self.version = self.cmd(
1665 os.path.join(self.daemondir, "bgpd") + " -v"
1666 ).split()[2]
1667 logger.info("{}: running version: {}".format(self.name, self.version))
1668
1669 rversion = self.version
1670 if rversion == None:
1671 return False
1672
1673 result = version_cmp(rversion, version)
1674 if cmpop == ">=":
1675 return result >= 0
1676 if cmpop == ">":
1677 return result > 0
1678 if cmpop == "=":
1679 return result == 0
1680 if cmpop == "<":
1681 return result < 0
1682 if cmpop == "<":
1683 return result < 0
1684 if cmpop == "<=":
1685 return result <= 0
1686
1687 def get_ipv6_linklocal(self):
1688 "Get LinkLocal Addresses from interfaces"
1689
1690 linklocal = []
1691
1692 ifaces = self.cmd("ip -6 address")
1693 # Fix newlines (make them all the same)
1694 ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
1695 interface = ""
1696 ll_per_if_count = 0
1697 for line in ifaces:
1698 m = re.search("[0-9]+: ([^:@]+)[@if0-9:]+ <", line)
1699 if m:
1700 interface = m.group(1)
1701 ll_per_if_count = 0
1702 m = re.search(
1703 "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
1704 line,
1705 )
1706 if m:
1707 local = m.group(1)
1708 ll_per_if_count += 1
1709 if ll_per_if_count > 1:
1710 linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
1711 else:
1712 linklocal += [[interface, local]]
1713 return linklocal
1714
1715 def daemon_available(self, daemon):
1716 "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
1717
1718 daemon_path = os.path.join(self.daemondir, daemon)
1719 if not os.path.isfile(daemon_path):
1720 return False
1721 if daemon == "ldpd":
1722 if version_cmp(platform.release(), "4.5") < 0:
1723 return False
1724 if not module_present("mpls-router", load=False):
1725 return False
1726 if not module_present("mpls-iptunnel", load=False):
1727 return False
1728 return True
1729
1730 def get_routertype(self):
1731 "Return the type of Router (frr)"
1732
1733 return self.routertype
1734
1735 def report_memory_leaks(self, filename_prefix, testscript):
1736 "Report Memory Leaks to file prefixed with given string"
1737
1738 leakfound = False
1739 filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt"
1740 for daemon in self.daemons:
1741 if self.daemons[daemon] == 1:
1742 log = self.getStdErr(daemon)
1743 if "memstats" in log:
1744 # Found memory leak
1745 logger.info(
1746 "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log)
1747 )
1748 if not leakfound:
1749 leakfound = True
1750 # Check if file already exists
1751 fileexists = os.path.isfile(filename)
1752 leakfile = open(filename, "a")
1753 if not fileexists:
1754 # New file - add header
1755 leakfile.write(
1756 "# Memory Leak Detection for topotest %s\n\n"
1757 % testscript
1758 )
1759 leakfile.write("## Router %s\n" % self.name)
1760 leakfile.write("### Process %s\n" % daemon)
1761 log = re.sub("core_handler: ", "", log)
1762 log = re.sub(
1763 r"(showing active allocations in memory group [a-zA-Z0-9]+)",
1764 r"\n#### \1\n",
1765 log,
1766 )
1767 log = re.sub("memstats: ", " ", log)
1768 leakfile.write(log)
1769 leakfile.write("\n")
1770 if leakfound:
1771 leakfile.close()
1772
1773
1774 class LinuxRouter(Router):
1775 "A Linux Router Node with IPv4/IPv6 forwarding enabled."
1776
1777 def __init__(self, name, **params):
1778 Router.__init__(self, name, **params)
1779
1780 def config(self, **params):
1781 Router.config(self, **params)
1782 # Enable forwarding on the router
1783 assert_sysctl(self, "net.ipv4.ip_forward", 1)
1784 assert_sysctl(self, "net.ipv6.conf.all.forwarding", 1)
1785 # Enable coredumps
1786 assert_sysctl(self, "kernel.core_uses_pid", 1)
1787 assert_sysctl(self, "fs.suid_dumpable", 1)
1788 # this applies to the kernel not the namespace...
1789 # original on ubuntu 17.x, but apport won't save as in namespace
1790 # |/usr/share/apport/apport %p %s %c %d %P
1791 corefile = "%e_core-sig_%s-pid_%p.dmp"
1792 assert_sysctl(self, "kernel.core_pattern", corefile)
1793
1794 def terminate(self):
1795 """
1796 Terminate generic LinuxRouter Mininet instance
1797 """
1798 set_sysctl(self, "net.ipv4.ip_forward", 0)
1799 set_sysctl(self, "net.ipv6.conf.all.forwarding", 0)
1800 Router.terminate(self)
1801
1802
1803 class FreeBSDRouter(Router):
1804 "A FreeBSD Router Node with IPv4/IPv6 forwarding enabled."
1805
1806 def __init__(eslf, name, **params):
1807 Router.__init__(Self, name, **params)
1808
1809
1810 class LegacySwitch(OVSSwitch):
1811 "A Legacy Switch without OpenFlow"
1812
1813 def __init__(self, name, **params):
1814 OVSSwitch.__init__(self, name, failMode="standalone", **params)
1815 self.switchIP = None
1816
1817
1818 def frr_unicode(s):
1819 """Convert string to unicode, depending on python version"""
1820 if sys.version_info[0] > 2:
1821 return s
1822 else:
1823 return unicode(s)