5 # Library of helper functions for NetDEF Topology Tests
7 # Copyright (c) 2016 by
8 # Network Device Education Foundation, Inc. ("NetDEF")
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
40 from copy
import deepcopy
42 import lib
.topolog
as topolog
43 from lib
.topolog
import logger
45 if sys
.version_info
[0] > 2:
47 from collections
.abc
import Mapping
49 import ConfigParser
as configparser
50 from collections
import Mapping
52 from lib
import micronet
53 from lib
.micronet_compat
import Node
58 def get_logs_path(rundir
):
59 logspath
= topolog
.get_test_logdir()
60 return os
.path
.join(rundir
, logspath
)
63 def gdb_core(obj
, daemon
, corefiles
):
79 gdbcmds
= [["-ex", i
.strip()] for i
in gdbcmds
.strip().split("\n")]
80 gdbcmds
= [item
for sl
in gdbcmds
for item
in sl
]
82 daemon_path
= os
.path
.join(obj
.daemondir
, daemon
)
83 backtrace
= subprocess
.check_output(
84 ["gdb", daemon_path
, corefiles
[0], "--batch"] + gdbcmds
87 "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj
.name
, daemon
)
89 sys
.stderr
.write("%s" % backtrace
)
93 class json_cmp_result(object):
94 "json_cmp result class for better assertion messages"
99 def add_error(self
, error
):
100 "Append error message to the result"
101 for line
in error
.splitlines():
102 self
.errors
.append(line
)
104 def has_errors(self
):
105 "Returns True if there were errors, otherwise False."
106 return len(self
.errors
) > 0
108 def gen_report(self
):
109 headline
= ["Generated JSON diff error report:", ""]
110 return headline
+ self
.errors
114 "Generated JSON diff error report:\n\n\n" + "\n".join(self
.errors
) + "\n\n"
118 def gen_json_diff_report(d1
, d2
, exact
=False, path
="> $", acc
=(0, "")):
120 Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye.
124 if isinstance(v
, (dict, list)):
125 return "\t" + "\t".join(
126 json
.dumps(v
, indent
=4, separators
=(",", ": ")).splitlines(True)
129 return "'{}'".format(v
)
132 if isinstance(v
, (list, tuple)):
134 elif isinstance(v
, dict):
136 elif isinstance(v
, (int, float)):
138 elif isinstance(v
, bool):
140 elif isinstance(v
, str):
145 def get_errors(other_acc
):
148 def get_errors_n(other_acc
):
151 def add_error(acc
, msg
, points
=1):
152 return (acc
[0] + points
, acc
[1] + "{}: {}\n".format(path
, msg
))
154 def merge_errors(acc
, other_acc
):
155 return (acc
[0] + other_acc
[0], acc
[1] + other_acc
[1])
158 return "{}[{}]".format(path
, idx
)
161 return "{}->{}".format(path
, key
)
163 def has_errors(other_acc
):
164 return other_acc
[0] > 0
167 not isinstance(d1
, (list, dict))
168 and not isinstance(d2
, (list, dict))
173 not isinstance(d1
, (list, dict))
174 and not isinstance(d2
, (list, dict))
179 "d1 has element with value '{}' but in d2 it has value '{}'".format(d1
, d2
),
183 and isinstance(d2
, list)
184 and ((len(d2
) > 0 and d2
[0] == "__ordered__") or exact
)
188 if len(d1
) != len(d2
):
191 "d1 has Array of length {} but in d2 it is of length {}".format(
196 for idx
, v1
, v2
in zip(range(0, len(d1
)), d1
, d2
):
198 acc
, gen_json_diff_report(v1
, v2
, exact
=exact
, path
=add_idx(idx
))
200 elif isinstance(d1
, list) and isinstance(d2
, list):
201 if len(d1
) < len(d2
):
204 "d1 has Array of length {} but in d2 it is of length {}".format(
209 for idx2
, v2
in zip(range(0, len(d2
)), d2
):
213 for idx1
, v1
in zip(range(0, len(d1
)), d1
):
214 tmp_v1
= deepcopy(v1
)
215 tmp_v2
= deepcopy(v2
)
216 tmp_diff
= gen_json_diff_report(tmp_v1
, tmp_v2
, path
=add_idx(idx1
))
217 if not has_errors(tmp_diff
):
221 elif not closest_diff
or get_errors_n(tmp_diff
) < get_errors_n(
224 closest_diff
= tmp_diff
226 if not found_match
and isinstance(v2
, (list, dict)):
227 sub_error
= "\n\n\t{}".format(
228 "\t".join(get_errors(closest_diff
).splitlines(True))
233 "d2 has the following element at index {} which is not present in d1: "
234 + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}"
235 ).format(idx2
, dump_json(v2
), closest_idx
, sub_error
),
237 if not found_match
and not isinstance(v2
, (list, dict)):
240 "d2 has the following element at index {} which is not present in d1: {}".format(
244 elif isinstance(d1
, dict) and isinstance(d2
, dict) and exact
:
245 invalid_keys_d1
= [k
for k
in d1
.keys() if k
not in d2
.keys()]
246 invalid_keys_d2
= [k
for k
in d2
.keys() if k
not in d1
.keys()]
247 for k
in invalid_keys_d1
:
248 acc
= add_error(acc
, "d1 has key '{}' which is not present in d2".format(k
))
249 for k
in invalid_keys_d2
:
250 acc
= add_error(acc
, "d2 has key '{}' which is not present in d1".format(k
))
251 valid_keys_intersection
= [k
for k
in d1
.keys() if k
in d2
.keys()]
252 for k
in valid_keys_intersection
:
254 acc
, gen_json_diff_report(d1
[k
], d2
[k
], exact
=exact
, path
=add_key(k
))
256 elif isinstance(d1
, dict) and isinstance(d2
, dict):
257 none_keys
= [k
for k
, v
in d2
.items() if v
== None]
258 none_keys_present
= [k
for k
in d1
.keys() if k
in none_keys
]
259 for k
in none_keys_present
:
261 acc
, "d1 has key '{}' which is not supposed to be present".format(k
)
263 keys
= [k
for k
, v
in d2
.items() if v
!= None]
264 invalid_keys_intersection
= [k
for k
in keys
if k
not in d1
.keys()]
265 for k
in invalid_keys_intersection
:
266 acc
= add_error(acc
, "d2 has key '{}' which is not present in d1".format(k
))
267 valid_keys_intersection
= [k
for k
in keys
if k
in d1
.keys()]
268 for k
in valid_keys_intersection
:
270 acc
, gen_json_diff_report(d1
[k
], d2
[k
], exact
=exact
, path
=add_key(k
))
275 "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format(
276 json_type(d1
), json_type(d2
)
284 def json_cmp(d1
, d2
, exact
=False):
286 JSON compare function. Receives two parameters:
287 * `d1`: parsed JSON data structure
288 * `d2`: parsed JSON data structure
290 Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
291 in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an
292 error report is generated and wrapped in a 'json_cmp_result()'. There are special
293 parameters and notations explained below which can be used to cover rather unusual
296 * when 'exact is set to 'True' then d1 and d2 are tested for equality (including
297 order within JSON Arrays)
298 * using 'null' (or 'None' in Python) as JSON Object value is checking for key
300 * using '*' as JSON Object value or Array value is checking for presence in d1
301 without checking the values
302 * using '__ordered__' as first element in a JSON Array in d2 will also check the
303 order when it is compared to an Array in d1
306 (errors_n
, errors
) = gen_json_diff_report(deepcopy(d1
), deepcopy(d2
), exact
=exact
)
309 result
= json_cmp_result()
310 result
.add_error(errors
)
316 def router_output_cmp(router
, cmd
, expected
):
318 Runs `cmd` in router and compares the output with `expected`.
321 normalize_text(router
.vtysh_cmd(cmd
)),
322 normalize_text(expected
),
323 title1
="Current output",
324 title2
="Expected output",
328 def router_json_cmp(router
, cmd
, data
, exact
=False):
330 Runs `cmd` that returns JSON data (normally the command ends with 'json')
331 and compare with `data` contents.
333 return json_cmp(router
.vtysh_cmd(cmd
, isjson
=True), data
, exact
)
336 def run_and_expect(func
, what
, count
=20, wait
=3):
338 Run `func` and compare the result with `what`. Do it for `count` times
339 waiting `wait` seconds between tries. By default it tries 20 times with
340 3 seconds delay between tries.
342 Returns (True, func-return) on success or
343 (False, func-return) on failure.
347 Helper functions to use with this function:
351 start_time
= time
.time()
352 func_name
= "<unknown>"
353 if func
.__class
__ == functools
.partial
:
354 func_name
= func
.func
.__name
__
356 func_name
= func
.__name
__
358 # Just a safety-check to avoid running topotests with very
359 # small wait/count arguments.
360 wait_time
= wait
* count
364 ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
369 "'{}' polling started (interval {} secs, maximum {} tries)".format(
370 func_name
, wait
, count
381 end_time
= time
.time()
383 "'{}' succeeded after {:.2f} seconds".format(
384 func_name
, end_time
- start_time
387 return (True, result
)
389 end_time
= time
.time()
391 "'{}' failed after {:.2f} seconds".format(func_name
, end_time
- start_time
)
393 return (False, result
)
396 def run_and_expect_type(func
, etype
, count
=20, wait
=3, avalue
=None):
398 Run `func` and compare the result with `etype`. Do it for `count` times
399 waiting `wait` seconds between tries. By default it tries 20 times with
400 3 seconds delay between tries.
402 This function is used when you want to test the return type and,
403 optionally, the return value.
405 Returns (True, func-return) on success or
406 (False, func-return) on failure.
408 start_time
= time
.time()
409 func_name
= "<unknown>"
410 if func
.__class
__ == functools
.partial
:
411 func_name
= func
.func
.__name
__
413 func_name
= func
.__name
__
415 # Just a safety-check to avoid running topotests with very
416 # small wait/count arguments.
417 wait_time
= wait
* count
421 ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
426 "'{}' polling started (interval {} secs, maximum wait {} secs)".format(
427 func_name
, wait
, int(wait
* count
)
433 if not isinstance(result
, etype
):
435 "Expected result type '{}' got '{}' instead".format(etype
, type(result
))
441 if etype
!= type(None) and avalue
!= None and result
!= avalue
:
442 logger
.debug("Expected value '{}' got '{}' instead".format(avalue
, result
))
447 end_time
= time
.time()
449 "'{}' succeeded after {:.2f} seconds".format(
450 func_name
, end_time
- start_time
453 return (True, result
)
455 end_time
= time
.time()
457 "'{}' failed after {:.2f} seconds".format(func_name
, end_time
- start_time
)
459 return (False, result
)
462 def router_json_cmp_retry(router
, cmd
, data
, exact
=False, retry_timeout
=10.0):
464 Runs `cmd` that returns JSON data (normally the command ends with 'json')
465 and compare with `data` contents. Retry by default for 10 seconds
469 return router_json_cmp(router
, cmd
, data
, exact
)
471 ok
, _
= run_and_expect(test_func
, None, int(retry_timeout
), 1)
476 "Converting Integer to DPID"
480 dpid
= "0" * (16 - len(dpid
)) + dpid
484 "Unable to derive default datapath ID - "
485 "please either specify a dpid or use a "
486 "canonical switch name such as s23."
491 "Check whether pid exists in the current process table."
496 os
.waitpid(pid
, os
.WNOHANG
)
501 except OSError as err
:
502 if err
.errno
== errno
.ESRCH
:
503 # ESRCH == No such process
505 elif err
.errno
== errno
.EPERM
:
506 # EPERM clearly means there's a process to deny access to
509 # According to "man 2 kill" possible error values are
510 # (EINVAL, EPERM, ESRCH)
516 def get_textdiff(text1
, text2
, title1
="", title2
="", **opts
):
517 "Returns empty string if same or formatted diff"
520 difflib
.unified_diff(text1
, text2
, fromfile
=title1
, tofile
=title2
, **opts
)
522 # Clean up line endings
523 diff
= os
.linesep
.join([s
for s
in diff
.splitlines() if s
])
527 def difflines(text1
, text2
, title1
="", title2
="", **opts
):
528 "Wrapper for get_textdiff to avoid string transformations."
529 text1
= ("\n".join(text1
.rstrip().splitlines()) + "\n").splitlines(1)
530 text2
= ("\n".join(text2
.rstrip().splitlines()) + "\n").splitlines(1)
531 return get_textdiff(text1
, text2
, title1
, title2
, **opts
)
534 def get_file(content
):
536 Generates a temporary file in '/tmp' with `content` and returns the file name.
538 if isinstance(content
, list) or isinstance(content
, tuple):
539 content
= "\n".join(content
)
540 fde
= tempfile
.NamedTemporaryFile(mode
="w", delete
=False)
547 def normalize_text(text
):
549 Strips formating spaces/tabs, carriage returns and trailing whitespace.
551 text
= re
.sub(r
"[ \t]+", " ", text
)
552 text
= re
.sub(r
"\r", "", text
)
554 # Remove whitespace in the middle of text.
555 text
= re
.sub(r
"[ \t]+\n", "\n", text
)
556 # Remove whitespace at the end of the text.
564 Parses unix name output to check if running on GNU/Linux.
566 Returns True if running on Linux, returns False otherwise.
569 if os
.uname()[0] == "Linux":
574 def iproute2_is_vrf_capable():
576 Checks if the iproute2 version installed on the system is capable of
577 handling VRFs by interpreting the output of the 'ip' utility found in PATH.
579 Returns True if capability can be detected, returns False otherwise.
584 subp
= subprocess
.Popen(
585 ["ip", "route", "show", "vrf"],
586 stdout
=subprocess
.PIPE
,
587 stderr
=subprocess
.PIPE
,
588 stdin
=subprocess
.PIPE
,
590 iproute2_err
= subp
.communicate()[1].splitlines()[0].split()[0]
592 if iproute2_err
!= "Error:":
599 def module_present_linux(module
, load
):
601 Returns whether `module` is present.
603 If `load` is true, it will try to load it via modprobe.
605 with
open("/proc/modules", "r") as modules_file
:
606 if module
.replace("-", "_") in modules_file
.read():
608 cmd
= "/sbin/modprobe {}{}".format("" if load
else "-n ", module
)
609 if os
.system(cmd
) != 0:
615 def module_present_freebsd(module
, load
):
619 def module_present(module
, load
=True):
620 if sys
.platform
.startswith("linux"):
621 return module_present_linux(module
, load
)
622 elif sys
.platform
.startswith("freebsd"):
623 return module_present_freebsd(module
, load
)
626 def version_cmp(v1
, v2
):
628 Compare two version strings and returns:
630 * `-1`: if `v1` is less than `v2`
631 * `0`: if `v1` is equal to `v2`
632 * `1`: if `v1` is greater than `v2`
634 Raises `ValueError` if versions are not well formated.
636 vregex
= r
"(?P<whole>\d+(\.(\d+))*)"
637 v1m
= re
.match(vregex
, v1
)
638 v2m
= re
.match(vregex
, v2
)
639 if v1m
is None or v2m
is None:
640 raise ValueError("got a invalid version string")
643 v1g
= v1m
.group("whole").split(".")
644 v2g
= v2m
.group("whole").split(".")
646 # Get the longest version string
651 # Reverse list because we are going to pop the tail
654 for _
in range(vnum
):
682 def interface_set_status(node
, ifacename
, ifaceaction
=False, vrf_name
=None):
684 str_ifaceaction
= "no shutdown"
686 str_ifaceaction
= "shutdown"
688 cmd
= 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
689 ifacename
, str_ifaceaction
693 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
694 ifacename
, vrf_name
, str_ifaceaction
700 def ip4_route_zebra(node
, vrf_name
=None):
702 Gets an output of 'show ip route' command. It can be used
703 with comparing the output to a reference
706 tmp
= node
.vtysh_cmd("show ip route")
708 tmp
= node
.vtysh_cmd("show ip route vrf {0}".format(vrf_name
))
709 output
= re
.sub(r
" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp
)
711 lines
= output
.splitlines()
713 while lines
and (not lines
[0].strip() or not header_found
):
714 if "o - offload failure" in lines
[0]:
717 return "\n".join(lines
)
720 def ip6_route_zebra(node
, vrf_name
=None):
722 Retrieves the output of 'show ipv6 route [vrf vrf_name]', then
723 canonicalizes it by eliding link-locals.
727 tmp
= node
.vtysh_cmd("show ipv6 route")
729 tmp
= node
.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name
))
732 output
= re
.sub(r
" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp
)
734 # Mask out the link-local addresses
735 output
= re
.sub(r
"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output
)
737 lines
= output
.splitlines()
739 while lines
and (not lines
[0].strip() or not header_found
):
740 if "o - offload failure" in lines
[0]:
744 return "\n".join(lines
)
747 def proto_name_to_number(protocol
):
763 ) # default return same as input
768 Gets a structured return of the command 'ip route'. It can be used in
769 conjunction with json_cmp() to provide accurate assert explanations.
784 output
= normalize_text(node
.run("ip route")).splitlines()
787 columns
= line
.split(" ")
788 route
= result
[columns
[0]] = {}
790 for column
in columns
:
792 route
["dev"] = column
794 route
["via"] = column
796 # translate protocol names back to numbers
797 route
["proto"] = proto_name_to_number(column
)
799 route
["metric"] = column
801 route
["scope"] = column
807 def ip4_vrf_route(node
):
809 Gets a structured return of the command 'ip route show vrf {0}-cust1'.
810 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
825 output
= normalize_text(
826 node
.run("ip route show vrf {0}-cust1".format(node
.name
))
831 columns
= line
.split(" ")
832 route
= result
[columns
[0]] = {}
834 for column
in columns
:
836 route
["dev"] = column
838 route
["via"] = column
840 # translate protocol names back to numbers
841 route
["proto"] = proto_name_to_number(column
)
843 route
["metric"] = column
845 route
["scope"] = column
853 Gets a structured return of the command 'ip -6 route'. It can be used in
854 conjunction with json_cmp() to provide accurate assert explanations.
868 output
= normalize_text(node
.run("ip -6 route")).splitlines()
871 columns
= line
.split(" ")
872 route
= result
[columns
[0]] = {}
874 for column
in columns
:
876 route
["dev"] = column
878 route
["via"] = column
880 # translate protocol names back to numbers
881 route
["proto"] = proto_name_to_number(column
)
883 route
["metric"] = column
885 route
["pref"] = column
891 def ip6_vrf_route(node
):
893 Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'.
894 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
908 output
= normalize_text(
909 node
.run("ip -6 route show vrf {0}-cust1".format(node
.name
))
913 columns
= line
.split(" ")
914 route
= result
[columns
[0]] = {}
916 for column
in columns
:
918 route
["dev"] = column
920 route
["via"] = column
922 # translate protocol names back to numbers
923 route
["proto"] = proto_name_to_number(column
)
925 route
["metric"] = column
927 route
["pref"] = column
935 Gets a structured return of the command 'ip rule'. It can be used in
936 conjunction with json_cmp() to provide accurate assert explanations.
952 "from": "1.2.0.0/16",
957 output
= normalize_text(node
.run("ip rule")).splitlines()
960 columns
= line
.split(" ")
963 # remove last character, since it is ':'
964 pref
= columns
[0][:-1]
967 for column
in columns
:
969 route
["from"] = column
973 route
["proto"] = column
975 route
["iif"] = column
977 route
["fwmark"] = column
984 def sleep(amount
, reason
=None):
986 Sleep wrapper that registers in the log the amount of sleep
989 logger
.info("Sleeping for {} seconds".format(amount
))
991 logger
.info(reason
+ " ({} seconds)".format(amount
))
996 def checkAddressSanitizerError(output
, router
, component
, logdir
=""):
997 "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
999 def processAddressSanitizerError(asanErrorRe
, output
, router
, component
):
1001 "%s: %s triggered an exception by AddressSanitizer\n" % (router
, component
)
1003 # Sanitizer Error found in log
1004 pidMark
= asanErrorRe
.group(1)
1005 addressSanitizerLog
= re
.search(
1006 "%s(.*)%s" % (pidMark
, pidMark
), output
, re
.DOTALL
1008 if addressSanitizerLog
:
1009 # Find Calling Test. Could be multiple steps back
1010 testframe
= sys
._current
_frames
().values()[0]
1013 test
= os
.path
.splitext(
1014 os
.path
.basename(testframe
.f_globals
["__file__"])
1016 if (test
!= "topotest") and (test
!= "topogen"):
1017 # Found the calling test
1018 callingTest
= os
.path
.basename(testframe
.f_globals
["__file__"])
1021 testframe
= testframe
.f_back
1023 # somehow couldn't find the test script.
1024 callingTest
= "unknownTest"
1026 # Now finding Calling Procedure
1029 callingProc
= sys
._getframe
(level
).f_code
.co_name
1031 (callingProc
!= "processAddressSanitizerError")
1032 and (callingProc
!= "checkAddressSanitizerError")
1033 and (callingProc
!= "checkRouterCores")
1034 and (callingProc
!= "stopRouter")
1035 and (callingProc
!= "stop")
1036 and (callingProc
!= "stop_topology")
1037 and (callingProc
!= "checkRouterRunning")
1038 and (callingProc
!= "check_router_running")
1039 and (callingProc
!= "routers_have_failure")
1041 # Found the calling test
1045 # something wrong - couldn't found the calling test function
1046 callingProc
= "unknownProc"
1047 with
open("/tmp/AddressSanitzer.txt", "a") as addrSanFile
:
1049 "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1050 % (callingTest
, callingProc
, router
)
1053 "\n".join(addressSanitizerLog
.group(1).splitlines()) + "\n"
1055 addrSanFile
.write("## Error: %s\n\n" % asanErrorRe
.group(2))
1057 "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1058 % (callingTest
, callingProc
, router
)
1062 + "\n ".join(addressSanitizerLog
.group(1).splitlines())
1065 addrSanFile
.write("\n---------------\n")
1068 addressSanitizerError
= re
.search(
1069 r
"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
1071 if addressSanitizerError
:
1072 processAddressSanitizerError(addressSanitizerError
, output
, router
, component
)
1075 # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
1077 filepattern
= logdir
+ "/" + router
+ "/" + component
+ ".asan.*"
1079 "Log check for %s on %s, pattern %s\n" % (component
, router
, filepattern
)
1081 for file in glob
.glob(filepattern
):
1082 with
open(file, "r") as asanErrorFile
:
1083 asanError
= asanErrorFile
.read()
1084 addressSanitizerError
= re
.search(
1085 r
"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
1087 if addressSanitizerError
:
1088 processAddressSanitizerError(
1089 addressSanitizerError
, asanError
, router
, component
1095 def _sysctl_atleast(commander
, variable
, min_value
):
1096 if isinstance(min_value
, tuple):
1097 min_value
= list(min_value
)
1098 is_list
= isinstance(min_value
, list)
1100 sval
= commander
.cmd_raises("sysctl -n " + variable
).strip()
1102 cur_val
= [int(x
) for x
in sval
.split()]
1108 for i
, v
in enumerate(cur_val
):
1109 if v
< min_value
[i
]:
1114 if cur_val
< min_value
:
1118 valstr
= " ".join([str(x
) for x
in min_value
])
1120 valstr
= str(min_value
)
1121 logger
.info("Increasing sysctl %s from %s to %s", variable
, cur_val
, valstr
)
1122 commander
.cmd_raises('sysctl -w {}="{}"\n'.format(variable
, valstr
))
1125 def _sysctl_assure(commander
, variable
, value
):
1126 if isinstance(value
, tuple):
1128 is_list
= isinstance(value
, list)
1130 sval
= commander
.cmd_raises("sysctl -n " + variable
).strip()
1132 cur_val
= [int(x
) for x
in sval
.split()]
1138 for i
, v
in enumerate(cur_val
):
1144 if cur_val
!= str(value
):
1149 valstr
= " ".join([str(x
) for x
in value
])
1152 logger
.info("Changing sysctl %s from %s to %s", variable
, cur_val
, valstr
)
1153 commander
.cmd_raises('sysctl -w {}="{}"\n'.format(variable
, valstr
))
1156 def sysctl_atleast(commander
, variable
, min_value
, raises
=False):
1158 if commander
is None:
1159 commander
= micronet
.Commander("topotest")
1160 return _sysctl_atleast(commander
, variable
, min_value
)
1161 except subprocess
.CalledProcessError
as error
:
1163 "%s: Failed to assure sysctl min value %s = %s",
1172 def sysctl_assure(commander
, variable
, value
, raises
=False):
1174 if commander
is None:
1175 commander
= micronet
.Commander("topotest")
1176 return _sysctl_assure(commander
, variable
, value
)
1177 except subprocess
.CalledProcessError
as error
:
1179 "%s: Failed to assure sysctl value %s = %s",
1189 def rlimit_atleast(rname
, min_value
, raises
=False):
1191 cval
= resource
.getrlimit(rname
)
1193 if soft
< min_value
:
1194 nval
= (min_value
, hard
if min_value
< hard
else min_value
)
1195 logger
.info("Increasing rlimit %s from %s to %s", rname
, cval
, nval
)
1196 resource
.setrlimit(rname
, nval
)
1197 except subprocess
.CalledProcessError
as error
:
1199 "Failed to assure rlimit [%s] = %s", rname
, min_value
, exc_info
=True
1205 def fix_netns_limits(ns
):
1207 # Maximum read and write socket buffer sizes
1208 sysctl_atleast(ns
, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20])
1209 sysctl_atleast(ns
, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20])
1211 sysctl_assure(ns
, "net.ipv4.conf.all.rp_filter", 0)
1212 sysctl_assure(ns
, "net.ipv4.conf.default.rp_filter", 0)
1213 sysctl_assure(ns
, "net.ipv4.conf.lo.rp_filter", 0)
1215 sysctl_assure(ns
, "net.ipv4.conf.all.forwarding", 1)
1216 sysctl_assure(ns
, "net.ipv4.conf.default.forwarding", 1)
1218 # XXX if things fail look here as this wasn't done previously
1219 sysctl_assure(ns
, "net.ipv6.conf.all.forwarding", 1)
1220 sysctl_assure(ns
, "net.ipv6.conf.default.forwarding", 1)
1223 sysctl_assure(ns
, "net.ipv4.conf.default.arp_announce", 2)
1224 sysctl_assure(ns
, "net.ipv4.conf.default.arp_notify", 1)
1225 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1226 sysctl_assure(ns
, "net.ipv4.conf.default.arp_ignore", 0)
1227 sysctl_assure(ns
, "net.ipv4.conf.all.arp_announce", 2)
1228 sysctl_assure(ns
, "net.ipv4.conf.all.arp_notify", 1)
1229 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1230 sysctl_assure(ns
, "net.ipv4.conf.all.arp_ignore", 0)
1232 sysctl_assure(ns
, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
1234 # Keep ipv6 permanent addresses on an admin down
1235 sysctl_assure(ns
, "net.ipv6.conf.all.keep_addr_on_down", 1)
1236 if version_cmp(platform
.release(), "4.20") >= 0:
1237 sysctl_assure(ns
, "net.ipv6.route.skip_notify_on_dev_down", 1)
1239 sysctl_assure(ns
, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
1240 sysctl_assure(ns
, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
1243 sysctl_atleast(ns
, "net.ipv4.igmp_max_memberships", 1000)
1245 # Use neigh information on selection of nexthop for multipath hops
1246 sysctl_assure(ns
, "net.ipv4.fib_multipath_use_neigh", 1)
1249 def fix_host_limits():
1250 """Increase system limits."""
1252 rlimit_atleast(resource
.RLIMIT_NPROC
, 8 * 1024)
1253 rlimit_atleast(resource
.RLIMIT_NOFILE
, 16 * 1024)
1254 sysctl_atleast(None, "fs.file-max", 16 * 1024)
1255 sysctl_atleast(None, "kernel.pty.max", 16 * 1024)
1258 # Original on ubuntu 17.x, but apport won't save as in namespace
1259 # |/usr/share/apport/apport %p %s %c %d %P
1260 sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
1261 sysctl_assure(None, "kernel.core_uses_pid", 1)
1262 sysctl_assure(None, "fs.suid_dumpable", 1)
1264 # Maximum connection backlog
1265 sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
1267 # Maximum read and write socket buffer sizes
1268 sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20)
1269 sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20)
1271 # Garbage Collection Settings for ARP and Neighbors
1272 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
1273 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024)
1274 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024)
1275 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024)
1276 # Hold entries for 10 minutes
1277 sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1278 sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1281 sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
1284 sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
1286 # Increase routing table size to 128K
1287 sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024)
1288 sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024)
1291 def setup_node_tmpdir(logdir
, name
):
1292 # Cleanup old log, valgrind, and core files.
1293 subprocess
.check_call(
1294 "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir
, name
), shell
=True
1297 # Setup the per node directory.
1298 nodelogdir
= "{}/{}".format(logdir
, name
)
1299 subprocess
.check_call(
1300 "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir
), shell
=True
1302 logfile
= "{0}/{1}.log".format(logdir
, name
)
1307 "A Node with IPv4/IPv6 forwarding enabled"
1309 def __init__(self
, name
, **params
):
1311 # Backward compatibility:
1312 # Load configuration defaults like topogen.
1313 self
.config_defaults
= configparser
.ConfigParser(
1315 "verbosity": "info",
1316 "frrdir": "/usr/lib/frr",
1317 "routertype": "frr",
1322 self
.config_defaults
.read(
1323 os
.path
.join(os
.path
.dirname(os
.path
.realpath(__file__
)), "../pytest.ini")
1326 # If this topology is using old API and doesn't have logdir
1327 # specified, then attempt to generate an unique logdir.
1328 self
.logdir
= params
.get("logdir")
1329 if self
.logdir
is None:
1330 self
.logdir
= get_logs_path(g_extra_config
["rundir"])
1332 if not params
.get("logger"):
1333 # If logger is present topogen has already set this up
1334 logfile
= setup_node_tmpdir(self
.logdir
, name
)
1335 l
= topolog
.get_logger(name
, log_level
="debug", target
=logfile
)
1336 params
["logger"] = l
1338 super(Router
, self
).__init
__(name
, **params
)
1340 self
.daemondir
= None
1341 self
.hasmpls
= False
1342 self
.routertype
= "frr"
1343 self
.unified_config
= None
1365 self
.daemons_options
= {"zebra": ""}
1366 self
.reportCores
= True
1369 self
.ns_cmd
= "sudo nsenter -a -t {} ".format(self
.pid
)
1371 # Allow escaping from running inside docker
1372 cgroup
= open("/proc/1/cgroup").read()
1373 m
= re
.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup
)
1375 self
.ns_cmd
= "docker exec -it {} ".format(m
.group(1)) + self
.ns_cmd
1379 logger
.debug("CMD to enter {}: {}".format(self
.name
, self
.ns_cmd
))
1381 def _config_frr(self
, **params
):
1382 "Configure FRR binaries"
1383 self
.daemondir
= params
.get("frrdir")
1384 if self
.daemondir
is None:
1385 self
.daemondir
= self
.config_defaults
.get("topogen", "frrdir")
1387 zebra_path
= os
.path
.join(self
.daemondir
, "zebra")
1388 if not os
.path
.isfile(zebra_path
):
1389 raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path
))
1391 # pylint: disable=W0221
1392 # Some params are only meaningful for the parent class.
1393 def config(self
, **params
):
1394 super(Router
, self
).config(**params
)
1396 # User did not specify the daemons directory, try to autodetect it.
1397 self
.daemondir
= params
.get("daemondir")
1398 if self
.daemondir
is None:
1399 self
.routertype
= params
.get(
1400 "routertype", self
.config_defaults
.get("topogen", "routertype")
1402 self
._config
_frr
(**params
)
1404 # Test the provided path
1405 zpath
= os
.path
.join(self
.daemondir
, "zebra")
1406 if not os
.path
.isfile(zpath
):
1407 raise Exception("No zebra binary found in {}".format(zpath
))
1408 # Allow user to specify routertype when the path was specified.
1409 if params
.get("routertype") is not None:
1410 self
.routertype
= params
.get("routertype")
1412 # Set ownership of config files
1413 self
.cmd("chown {0}:{0}vty /etc/{0}".format(self
.routertype
))
1415 def terminate(self
):
1416 # Stop running FRR daemons
1418 super(Router
, self
).terminate()
1419 os
.system("chmod -R go+rw " + self
.logdir
)
1421 # Return count of running daemons
1422 def listDaemons(self
):
1424 rc
, stdout
, _
= self
.cmd_status(
1425 "ls -1 /var/run/%s/*.pid" % self
.routertype
, warn
=False
1429 for d
in stdout
.strip().split("\n"):
1432 pid
= int(self
.cmd_raises("cat %s" % pidfile
, warn
=False).strip())
1433 name
= os
.path
.basename(pidfile
[:-4])
1435 # probably not compatible with bsd.
1436 rc
, _
, _
= self
.cmd_status("test -d /proc/{}".format(pid
), warn
=False)
1439 "%s: %s exited leaving pidfile %s (%s)",
1445 self
.cmd("rm -- " + pidfile
)
1447 ret
.append((name
, pid
))
1448 except (subprocess
.CalledProcessError
, ValueError):
1452 def stopRouter(self
, assertOnError
=True, minErrorVersion
="5.1"):
1453 # Stop Running FRR Daemons
1454 running
= self
.listDaemons()
1458 logger
.info("%s: stopping %s", self
.name
, ", ".join([x
[0] for x
in running
]))
1459 for name
, pid
in running
:
1460 logger
.info("{}: sending SIGTERM to {}".format(self
.name
, name
))
1462 os
.kill(pid
, signal
.SIGTERM
)
1463 except OSError as err
:
1465 "%s: could not kill %s (%s): %s", self
.name
, name
, pid
, str(err
)
1468 running
= self
.listDaemons()
1470 for _
in range(0, 30):
1473 "{}: waiting for daemons stopping: {}".format(
1474 self
.name
, ", ".join([x
[0] for x
in running
])
1477 running
= self
.listDaemons()
1485 "%s: sending SIGBUS to: %s", self
.name
, ", ".join([x
[0] for x
in running
])
1487 for name
, pid
in running
:
1488 pidfile
= "/var/run/{}/{}.pid".format(self
.routertype
, name
)
1489 logger
.info("%s: killing %s", self
.name
, name
)
1490 self
.cmd("kill -SIGBUS %d" % pid
)
1491 self
.cmd("rm -- " + pidfile
)
1494 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self
.name
1497 errors
= self
.checkRouterCores(reportOnce
=True)
1498 if self
.checkRouterVersion("<", minErrorVersion
):
1499 # ignore errors in old versions
1501 if assertOnError
and (errors
is not None) and len(errors
) > 0:
1502 assert "Errors found - details follow:" == 0, errors
1505 def removeIPs(self
):
1506 for interface
in self
.intfNames():
1508 self
.intf_ip_cmd(interface
, "ip address flush " + interface
)
1509 except Exception as ex
:
1510 logger
.error("%s can't remove IPs %s", self
, str(ex
))
1512 # assert False, "can't remove IPs %s" % str(ex)
1514 def checkCapability(self
, daemon
, param
):
1515 if param
is not None:
1516 daemon_path
= os
.path
.join(self
.daemondir
, daemon
)
1517 daemon_search_option
= param
.replace("-", "")
1519 "{0} -h | grep {1}".format(daemon_path
, daemon_search_option
)
1521 if daemon_search_option
not in output
:
1525 def loadConf(self
, daemon
, source
=None, param
=None):
1526 """Enabled and set config for a daemon.
1528 Arranges for loading of daemon configuration from the specified source. Possible
1529 `source` values are `None` for an empty config file, a path name which is used
1530 directly, or a file name with no path components which is first looked for
1531 directly and then looked for under a sub-directory named after router.
1534 # Unfortunately this API allowsfor source to not exist for any and all routers.
1536 head
, tail
= os
.path
.split(source
)
1537 if not head
and not self
.path_exists(tail
):
1538 script_dir
= os
.environ
["PYTEST_TOPOTEST_SCRIPTDIR"]
1539 router_relative
= os
.path
.join(script_dir
, self
.name
, tail
)
1540 if self
.path_exists(router_relative
):
1541 source
= router_relative
1543 "using router relative configuration: {}".format(source
)
1546 # print "Daemons before:", self.daemons
1547 if daemon
in self
.daemons
.keys() or daemon
== "frr":
1549 self
.unified_config
= 1
1551 self
.daemons
[daemon
] = 1
1552 if param
is not None:
1553 self
.daemons_options
[daemon
] = param
1554 conf_file
= "/etc/{}/{}.conf".format(self
.routertype
, daemon
)
1555 if source
is None or not os
.path
.exists(source
):
1556 if daemon
== "frr" or not self
.unified_config
:
1557 self
.cmd_raises("rm -f " + conf_file
)
1558 self
.cmd_raises("touch " + conf_file
)
1560 self
.cmd_raises("cp {} {}".format(source
, conf_file
))
1562 if not self
.unified_config
or daemon
== "frr":
1563 self
.cmd_raises("chown {0}:{0} {1}".format(self
.routertype
, conf_file
))
1564 self
.cmd_raises("chmod 664 {}".format(conf_file
))
1566 if (daemon
== "snmpd") and (self
.routertype
== "frr"):
1567 # /etc/snmp is private mount now
1568 self
.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf')
1569 self
.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
1571 if (daemon
== "zebra") and (self
.daemons
["staticd"] == 0):
1572 # Add staticd with zebra - if it exists
1574 staticd_path
= os
.path
.join(self
.daemondir
, "staticd")
1578 if os
.path
.isfile(staticd_path
):
1579 self
.daemons
["staticd"] = 1
1580 self
.daemons_options
["staticd"] = ""
1581 # Auto-Started staticd has no config, so it will read from zebra config
1583 logger
.info("No daemon {} known".format(daemon
))
1584 # print "Daemons after:", self.daemons
1586 def runInWindow(self
, cmd
, title
=None):
1587 return self
.run_in_window(cmd
, title
)
1589 def startRouter(self
, tgen
=None):
1590 if self
.unified_config
:
1592 'echo "service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1596 # Disable integrated-vtysh-config
1598 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1603 "chown %s:%svty /etc/%s/vtysh.conf"
1604 % (self
.routertype
, self
.routertype
, self
.routertype
)
1606 # TODO remove the following lines after all tests are migrated to Topogen.
1607 # Try to find relevant old logfiles in /tmp and delete them
1608 map(os
.remove
, glob
.glob("{}/{}/*.log".format(self
.logdir
, self
.name
)))
1609 # Remove old core files
1610 map(os
.remove
, glob
.glob("{}/{}/*.dmp".format(self
.logdir
, self
.name
)))
1611 # Remove IP addresses from OS first - we have them in zebra.conf
1613 # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
1614 # No error - but return message and skip all the tests
1615 if self
.daemons
["ldpd"] == 1:
1616 ldpd_path
= os
.path
.join(self
.daemondir
, "ldpd")
1617 if not os
.path
.isfile(ldpd_path
):
1618 logger
.info("LDP Test, but no ldpd compiled or installed")
1619 return "LDP Test, but no ldpd compiled or installed"
1621 if version_cmp(platform
.release(), "4.5") < 0:
1622 logger
.info("LDP Test need Linux Kernel 4.5 minimum")
1623 return "LDP Test need Linux Kernel 4.5 minimum"
1624 # Check if have mpls
1626 self
.hasmpls
= tgen
.hasmpls
1627 if self
.hasmpls
!= True:
1629 "LDP/MPLS Tests will be skipped, platform missing module(s)"
1632 # Test for MPLS Kernel modules available
1633 self
.hasmpls
= False
1634 if not module_present("mpls-router"):
1636 "MPLS tests will not run (missing mpls-router kernel module)"
1638 elif not module_present("mpls-iptunnel"):
1640 "MPLS tests will not run (missing mpls-iptunnel kernel module)"
1644 if self
.hasmpls
!= True:
1645 return "LDP/MPLS Tests need mpls kernel modules"
1647 # Really want to use sysctl_atleast here, but only when MPLS is actually being
1649 self
.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
1651 shell_routers
= g_extra_config
["shell"]
1652 if "all" in shell_routers
or self
.name
in shell_routers
:
1653 self
.run_in_window(os
.getenv("SHELL", "bash"), title
="sh-%s" % self
.name
)
1655 if self
.daemons
["eigrpd"] == 1:
1656 eigrpd_path
= os
.path
.join(self
.daemondir
, "eigrpd")
1657 if not os
.path
.isfile(eigrpd_path
):
1658 logger
.info("EIGRP Test, but no eigrpd compiled or installed")
1659 return "EIGRP Test, but no eigrpd compiled or installed"
1661 if self
.daemons
["bfdd"] == 1:
1662 bfdd_path
= os
.path
.join(self
.daemondir
, "bfdd")
1663 if not os
.path
.isfile(bfdd_path
):
1664 logger
.info("BFD Test, but no bfdd compiled or installed")
1665 return "BFD Test, but no bfdd compiled or installed"
1667 status
= self
.startRouterDaemons(tgen
=tgen
)
1669 vtysh_routers
= g_extra_config
["vtysh"]
1670 if "all" in vtysh_routers
or self
.name
in vtysh_routers
:
1671 self
.run_in_window("vtysh", title
="vt-%s" % self
.name
)
1673 if self
.unified_config
:
1674 self
.cmd("vtysh -f /etc/frr/frr.conf")
1678 def getStdErr(self
, daemon
):
1679 return self
.getLog("err", daemon
)
1681 def getStdOut(self
, daemon
):
1682 return self
.getLog("out", daemon
)
1684 def getLog(self
, log
, daemon
):
1685 return self
.cmd("cat {}/{}/{}.{}".format(self
.logdir
, self
.name
, daemon
, log
))
1687 def startRouterDaemons(self
, daemons
=None, tgen
=None):
1688 "Starts FRR daemons for this router."
1690 asan_abort
= g_extra_config
["asan_abort"]
1691 gdb_breakpoints
= g_extra_config
["gdb_breakpoints"]
1692 gdb_daemons
= g_extra_config
["gdb_daemons"]
1693 gdb_routers
= g_extra_config
["gdb_routers"]
1694 valgrind_extra
= g_extra_config
["valgrind_extra"]
1695 valgrind_memleaks
= g_extra_config
["valgrind_memleaks"]
1696 strace_daemons
= g_extra_config
["strace_daemons"]
1698 # Get global bundle data
1699 if not self
.path_exists("/etc/frr/support_bundle_commands.conf"):
1700 # Copy global value if was covered by namespace mount
1702 if os
.path
.exists("/etc/frr/support_bundle_commands.conf"):
1703 with
open("/etc/frr/support_bundle_commands.conf", "r") as rf
:
1704 bundle_data
= rf
.read()
1706 "cat > /etc/frr/support_bundle_commands.conf",
1710 # Starts actual daemons without init (ie restart)
1711 # cd to per node directory
1712 self
.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self
.logdir
, self
.name
))
1713 self
.set_cwd("{}/{}".format(self
.logdir
, self
.name
))
1714 self
.cmd("umask 000")
1716 # Re-enable to allow for report per run
1717 self
.reportCores
= True
1719 # XXX: glue code forward ported from removed function.
1720 if self
.version
== None:
1721 self
.version
= self
.cmd(
1722 os
.path
.join(self
.daemondir
, "bgpd") + " -v"
1724 logger
.info("{}: running version: {}".format(self
.name
, self
.version
))
1725 # If `daemons` was specified then some upper API called us with
1726 # specific daemons, otherwise just use our own configuration.
1728 if daemons
is not None:
1729 daemons_list
= daemons
1731 # Append all daemons configured.
1732 for daemon
in self
.daemons
:
1733 if self
.daemons
[daemon
] == 1:
1734 daemons_list
.append(daemon
)
1736 def start_daemon(daemon
, extra_opts
=None):
1737 daemon_opts
= self
.daemons_options
.get(daemon
, "")
1738 rediropt
= " > {0}.out 2> {0}.err".format(daemon
)
1739 if daemon
== "snmpd":
1740 binary
= "/usr/sbin/snmpd"
1742 cmdopt
= "{} -C -c /etc/frr/snmpd.conf -p ".format(
1744 ) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self
.routertype
)
1746 binary
= os
.path
.join(self
.daemondir
, daemon
)
1748 cmdenv
= "ASAN_OPTIONS="
1750 cmdenv
= "abort_on_error=1:"
1751 cmdenv
+= "log_path={0}/{1}.{2}.asan ".format(
1752 self
.logdir
, self
.name
, daemon
1755 if valgrind_memleaks
:
1756 this_dir
= os
.path
.dirname(
1757 os
.path
.abspath(os
.path
.realpath(__file__
))
1759 supp_file
= os
.path
.abspath(
1760 os
.path
.join(this_dir
, "../../../tools/valgrind.supp")
1762 cmdenv
+= " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
1763 daemon
, self
.logdir
, self
.name
, supp_file
1767 " --gen-suppressions=all --expensive-definedness-checks=yes"
1769 elif daemon
in strace_daemons
or "all" in strace_daemons
:
1770 cmdenv
= "strace -f -D -o {1}/{2}.strace.{0} ".format(
1771 daemon
, self
.logdir
, self
.name
1774 cmdopt
= "{} --command-log-always --log file:{}.log --log-level debug".format(
1778 cmdopt
+= " " + extra_opts
1781 (gdb_routers
or gdb_daemons
)
1783 not gdb_routers
or self
.name
in gdb_routers
or "all" in gdb_routers
1785 and (not gdb_daemons
or daemon
in gdb_daemons
or "all" in gdb_daemons
)
1787 if daemon
== "snmpd":
1791 gdbcmd
= "sudo -E gdb " + binary
1793 gdbcmd
+= " -ex 'set breakpoint pending on'"
1794 for bp
in gdb_breakpoints
:
1795 gdbcmd
+= " -ex 'b {}'".format(bp
)
1796 gdbcmd
+= " -ex 'run {}'".format(cmdopt
)
1798 self
.run_in_window(gdbcmd
, daemon
)
1801 "%s: %s %s launched in gdb window", self
, self
.routertype
, daemon
1804 if daemon
!= "snmpd":
1809 self
.cmd_raises(" ".join([cmdenv
, binary
, cmdopt
]), warn
=False)
1810 except subprocess
.CalledProcessError
as error
:
1812 '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
1817 '\n:stdout: "{}"'.format(error
.stdout
.strip())
1820 '\n:stderr: "{}"'.format(error
.stderr
.strip())
1825 logger
.info("%s: %s %s started", self
, self
.routertype
, daemon
)
1828 if "zebra" in daemons_list
:
1829 start_daemon("zebra", "-s 90000000")
1830 while "zebra" in daemons_list
:
1831 daemons_list
.remove("zebra")
1833 # Start staticd next if required
1834 if "staticd" in daemons_list
:
1835 start_daemon("staticd")
1836 while "staticd" in daemons_list
:
1837 daemons_list
.remove("staticd")
1839 if "snmpd" in daemons_list
:
1840 # Give zerbra a chance to configure interface addresses that snmpd daemon
1844 start_daemon("snmpd")
1845 while "snmpd" in daemons_list
:
1846 daemons_list
.remove("snmpd")
1849 # Fix Link-Local Addresses on initial startup
1850 # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
1851 _
, output
, _
= self
.cmd_status(
1852 "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done",
1853 stderr
=subprocess
.STDOUT
,
1855 logger
.debug("Set MACs:\n%s", output
)
1857 # Now start all the other daemons
1858 for daemon
in daemons_list
:
1859 if self
.daemons
[daemon
] == 0:
1861 start_daemon(daemon
)
1863 # Check if daemons are running.
1864 rundaemons
= self
.cmd("ls -1 /var/run/%s/*.pid" % self
.routertype
)
1865 if re
.search(r
"No such file or directory", rundaemons
):
1866 return "Daemons are not running"
1868 # Update the permissions on the log files
1869 self
.cmd("chown frr:frr -R {}/{}".format(self
.logdir
, self
.name
))
1870 self
.cmd("chmod ug+rwX,o+r -R {}/{}".format(self
.logdir
, self
.name
))
1874 def killRouterDaemons(
1875 self
, daemons
, wait
=True, assertOnError
=True, minErrorVersion
="5.1"
1878 # Daemons(user specified daemon only) using SIGKILL
1879 rundaemons
= self
.cmd("ls -1 /var/run/%s/*.pid" % self
.routertype
)
1881 daemonsNotRunning
= []
1882 if re
.search(r
"No such file or directory", rundaemons
):
1884 for daemon
in daemons
:
1885 if rundaemons
is not None and daemon
in rundaemons
:
1887 dmns
= rundaemons
.split("\n")
1888 # Exclude empty string at end of list
1890 if re
.search(r
"%s" % daemon
, d
):
1891 daemonpidfile
= d
.rstrip()
1892 daemonpid
= self
.cmd("cat %s" % daemonpidfile
).rstrip()
1893 if daemonpid
.isdigit() and pid_exists(int(daemonpid
)):
1895 "{}: killing {}".format(
1897 os
.path
.basename(daemonpidfile
.rsplit(".", 1)[0]),
1900 os
.kill(int(daemonpid
), signal
.SIGKILL
)
1901 if pid_exists(int(daemonpid
)):
1903 while wait
and numRunning
> 0:
1906 "{}: waiting for {} daemon to be stopped".format(
1911 # 2nd round of kill if daemons didn't exit
1913 if re
.search(r
"%s" % daemon
, d
):
1914 daemonpid
= self
.cmd("cat %s" % d
.rstrip()).rstrip()
1915 if daemonpid
.isdigit() and pid_exists(
1919 "{}: killing {}".format(
1922 d
.rstrip().rsplit(".", 1)[0]
1926 os
.kill(int(daemonpid
), signal
.SIGKILL
)
1927 if daemonpid
.isdigit() and not pid_exists(
1931 self
.cmd("rm -- {}".format(daemonpidfile
))
1933 errors
= self
.checkRouterCores(reportOnce
=True)
1934 if self
.checkRouterVersion("<", minErrorVersion
):
1935 # ignore errors in old versions
1937 if assertOnError
and len(errors
) > 0:
1938 assert "Errors found - details follow:" == 0, errors
1940 daemonsNotRunning
.append(daemon
)
1941 if len(daemonsNotRunning
) > 0:
1942 errors
= errors
+ "Daemons are not running", daemonsNotRunning
1946 def checkRouterCores(self
, reportLeaks
=True, reportOnce
=False):
1947 if reportOnce
and not self
.reportCores
:
1951 for daemon
in self
.daemons
:
1952 if self
.daemons
[daemon
] == 1:
1953 # Look for core file
1954 corefiles
= glob
.glob(
1955 "{}/{}/{}_core*.dmp".format(self
.logdir
, self
.name
, daemon
)
1957 if len(corefiles
) > 0:
1958 backtrace
= gdb_core(self
, daemon
, corefiles
)
1961 + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s"
1962 % (self
.name
, daemon
, backtrace
)
1966 log
= self
.getStdErr(daemon
)
1967 if "memstats" in log
:
1969 "%s: %s has memory leaks:\n" % (self
.name
, daemon
)
1971 traces
= traces
+ "\n%s: %s has memory leaks:\n" % (
1975 log
= re
.sub("core_handler: ", "", log
)
1977 r
"(showing active allocations in memory group [a-zA-Z0-9]+)",
1981 log
= re
.sub("memstats: ", " ", log
)
1982 sys
.stderr
.write(log
)
1984 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
1985 if checkAddressSanitizerError(
1986 self
.getStdErr(daemon
), self
.name
, daemon
, self
.logdir
1989 "%s: Daemon %s killed by AddressSanitizer" % (self
.name
, daemon
)
1991 traces
= traces
+ "\n%s: Daemon %s killed by AddressSanitizer" % (
1997 self
.reportCores
= False
2000 def checkRouterRunning(self
):
2001 "Check if router daemons are running and collect crashinfo they don't run"
2005 daemonsRunning
= self
.cmd(
2006 'vtysh -c "show logging" | grep "Logging configuration for"'
2008 # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
2009 if checkAddressSanitizerError(daemonsRunning
, self
.name
, "vtysh"):
2010 return "%s: vtysh killed by AddressSanitizer" % (self
.name
)
2012 for daemon
in self
.daemons
:
2013 if daemon
== "snmpd":
2015 if (self
.daemons
[daemon
] == 1) and not (daemon
in daemonsRunning
):
2016 sys
.stderr
.write("%s: Daemon %s not running\n" % (self
.name
, daemon
))
2017 if daemon
== "staticd":
2019 "You may have a copy of staticd installed but are attempting to test against\n"
2022 "a version of FRR that does not have staticd, please cleanup the install dir\n"
2025 # Look for core file
2026 corefiles
= glob
.glob(
2027 "{}/{}/{}_core*.dmp".format(self
.logdir
, self
.name
, daemon
)
2029 if len(corefiles
) > 0:
2030 gdb_core(self
, daemon
, corefiles
)
2032 # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
2034 "{}/{}/{}.log".format(self
.logdir
, self
.name
, daemon
)
2036 log_tail
= subprocess
.check_output(
2038 "tail -n20 {}/{}/{}.log 2> /dev/null".format(
2039 self
.logdir
, self
.name
, daemon
2045 "\nFrom %s %s %s log file:\n"
2046 % (self
.routertype
, self
.name
, daemon
)
2048 sys
.stderr
.write("%s\n" % log_tail
)
2050 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2051 if checkAddressSanitizerError(
2052 self
.getStdErr(daemon
), self
.name
, daemon
, self
.logdir
2054 return "%s: Daemon %s not running - killed by AddressSanitizer" % (
2059 return "%s: Daemon %s not running" % (self
.name
, daemon
)
2062 def checkRouterVersion(self
, cmpop
, version
):
2064 Compares router version using operation `cmpop` with `version`.
2065 Valid `cmpop` values:
2066 * `>=`: has the same version or greater
2067 * '>': has greater version
2068 * '=': has the same version
2069 * '<': has a lesser version
2070 * '<=': has the same version or lesser
2072 Usage example: router.checkRouterVersion('>', '1.0')
2075 # Make sure we have version information first
2076 if self
.version
== None:
2077 self
.version
= self
.cmd(
2078 os
.path
.join(self
.daemondir
, "bgpd") + " -v"
2080 logger
.info("{}: running version: {}".format(self
.name
, self
.version
))
2082 rversion
= self
.version
2083 if rversion
== None:
2086 result
= version_cmp(rversion
, version
)
2100 def get_ipv6_linklocal(self
):
2101 "Get LinkLocal Addresses from interfaces"
2105 ifaces
= self
.cmd("ip -6 address")
2106 # Fix newlines (make them all the same)
2107 ifaces
= ("\n".join(ifaces
.splitlines()) + "\n").splitlines()
2111 m
= re
.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line
)
2113 interface
= m
.group(1)
2116 "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
2121 ll_per_if_count
+= 1
2122 if ll_per_if_count
> 1:
2123 linklocal
+= [["%s-%s" % (interface
, ll_per_if_count
), local
]]
2125 linklocal
+= [[interface
, local
]]
2128 def daemon_available(self
, daemon
):
2129 "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
2131 daemon_path
= os
.path
.join(self
.daemondir
, daemon
)
2132 if not os
.path
.isfile(daemon_path
):
2134 if daemon
== "ldpd":
2135 if version_cmp(platform
.release(), "4.5") < 0:
2137 if not module_present("mpls-router", load
=False):
2139 if not module_present("mpls-iptunnel", load
=False):
2143 def get_routertype(self
):
2144 "Return the type of Router (frr)"
2146 return self
.routertype
2148 def report_memory_leaks(self
, filename_prefix
, testscript
):
2149 "Report Memory Leaks to file prefixed with given string"
2152 filename
= filename_prefix
+ re
.sub(r
"\.py", "", testscript
) + ".txt"
2153 for daemon
in self
.daemons
:
2154 if self
.daemons
[daemon
] == 1:
2155 log
= self
.getStdErr(daemon
)
2156 if "memstats" in log
:
2159 "\nRouter {} {} StdErr Log:\n{}".format(self
.name
, daemon
, log
)
2163 # Check if file already exists
2164 fileexists
= os
.path
.isfile(filename
)
2165 leakfile
= open(filename
, "a")
2167 # New file - add header
2169 "# Memory Leak Detection for topotest %s\n\n"
2172 leakfile
.write("## Router %s\n" % self
.name
)
2173 leakfile
.write("### Process %s\n" % daemon
)
2174 log
= re
.sub("core_handler: ", "", log
)
2176 r
"(showing active allocations in memory group [a-zA-Z0-9]+)",
2180 log
= re
.sub("memstats: ", " ", log
)
2182 leakfile
.write("\n")
2188 """Convert string to unicode, depending on python version"""
2189 if sys
.version_info
[0] > 2:
2192 return unicode(s
) # pylint: disable=E0602
2196 return isinstance(o
, Mapping
)