5 # Library of helper functions for NetDEF Topology Tests
7 # Copyright (c) 2016 by
8 # Network Device Education Foundation, Inc. ("NetDEF")
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
40 from copy
import deepcopy
42 import lib
.topolog
as topolog
43 from lib
.topolog
import logger
45 if sys
.version_info
[0] > 2:
47 from collections
.abc
import Mapping
49 import ConfigParser
as configparser
50 from collections
import Mapping
52 from lib
import micronet
53 from lib
.micronet_compat
import Node
58 def get_logs_path(rundir
):
59 logspath
= topolog
.get_test_logdir()
60 return os
.path
.join(rundir
, logspath
)
63 def gdb_core(obj
, daemon
, corefiles
):
79 gdbcmds
= [["-ex", i
.strip()] for i
in gdbcmds
.strip().split("\n")]
80 gdbcmds
= [item
for sl
in gdbcmds
for item
in sl
]
82 daemon_path
= os
.path
.join(obj
.daemondir
, daemon
)
83 backtrace
= subprocess
.check_output(
84 ["gdb", daemon_path
, corefiles
[0], "--batch"] + gdbcmds
87 "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj
.name
, daemon
)
89 sys
.stderr
.write("%s" % backtrace
)
93 class json_cmp_result(object):
94 "json_cmp result class for better assertion messages"
99 def add_error(self
, error
):
100 "Append error message to the result"
101 for line
in error
.splitlines():
102 self
.errors
.append(line
)
104 def has_errors(self
):
105 "Returns True if there were errors, otherwise False."
106 return len(self
.errors
) > 0
108 def gen_report(self
):
109 headline
= ["Generated JSON diff error report:", ""]
110 return headline
+ self
.errors
114 "Generated JSON diff error report:\n\n\n" + "\n".join(self
.errors
) + "\n\n"
118 def gen_json_diff_report(d1
, d2
, exact
=False, path
="> $", acc
=(0, "")):
120 Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye.
124 if isinstance(v
, (dict, list)):
125 return "\t" + "\t".join(
126 json
.dumps(v
, indent
=4, separators
=(",", ": ")).splitlines(True)
129 return "'{}'".format(v
)
132 if isinstance(v
, (list, tuple)):
134 elif isinstance(v
, dict):
136 elif isinstance(v
, (int, float)):
138 elif isinstance(v
, bool):
140 elif isinstance(v
, str):
145 def get_errors(other_acc
):
148 def get_errors_n(other_acc
):
151 def add_error(acc
, msg
, points
=1):
152 return (acc
[0] + points
, acc
[1] + "{}: {}\n".format(path
, msg
))
154 def merge_errors(acc
, other_acc
):
155 return (acc
[0] + other_acc
[0], acc
[1] + other_acc
[1])
158 return "{}[{}]".format(path
, idx
)
161 return "{}->{}".format(path
, key
)
163 def has_errors(other_acc
):
164 return other_acc
[0] > 0
167 not isinstance(d1
, (list, dict))
168 and not isinstance(d2
, (list, dict))
173 not isinstance(d1
, (list, dict))
174 and not isinstance(d2
, (list, dict))
179 "d1 has element with value '{}' but in d2 it has value '{}'".format(d1
, d2
),
183 and isinstance(d2
, list)
184 and ((len(d2
) > 0 and d2
[0] == "__ordered__") or exact
)
188 if len(d1
) != len(d2
):
191 "d1 has Array of length {} but in d2 it is of length {}".format(
196 for idx
, v1
, v2
in zip(range(0, len(d1
)), d1
, d2
):
198 acc
, gen_json_diff_report(v1
, v2
, exact
=exact
, path
=add_idx(idx
))
200 elif isinstance(d1
, list) and isinstance(d2
, list):
201 if len(d1
) < len(d2
):
204 "d1 has Array of length {} but in d2 it is of length {}".format(
209 for idx2
, v2
in zip(range(0, len(d2
)), d2
):
213 for idx1
, v1
in zip(range(0, len(d1
)), d1
):
214 tmp_v1
= deepcopy(v1
)
215 tmp_v2
= deepcopy(v2
)
216 tmp_diff
= gen_json_diff_report(tmp_v1
, tmp_v2
, path
=add_idx(idx1
))
217 if not has_errors(tmp_diff
):
221 elif not closest_diff
or get_errors_n(tmp_diff
) < get_errors_n(
224 closest_diff
= tmp_diff
226 if not found_match
and isinstance(v2
, (list, dict)):
227 sub_error
= "\n\n\t{}".format(
228 "\t".join(get_errors(closest_diff
).splitlines(True))
233 "d2 has the following element at index {} which is not present in d1: "
234 + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}"
235 ).format(idx2
, dump_json(v2
), closest_idx
, sub_error
),
237 if not found_match
and not isinstance(v2
, (list, dict)):
240 "d2 has the following element at index {} which is not present in d1: {}".format(
244 elif isinstance(d1
, dict) and isinstance(d2
, dict) and exact
:
245 invalid_keys_d1
= [k
for k
in d1
.keys() if k
not in d2
.keys()]
246 invalid_keys_d2
= [k
for k
in d2
.keys() if k
not in d1
.keys()]
247 for k
in invalid_keys_d1
:
248 acc
= add_error(acc
, "d1 has key '{}' which is not present in d2".format(k
))
249 for k
in invalid_keys_d2
:
250 acc
= add_error(acc
, "d2 has key '{}' which is not present in d1".format(k
))
251 valid_keys_intersection
= [k
for k
in d1
.keys() if k
in d2
.keys()]
252 for k
in valid_keys_intersection
:
254 acc
, gen_json_diff_report(d1
[k
], d2
[k
], exact
=exact
, path
=add_key(k
))
256 elif isinstance(d1
, dict) and isinstance(d2
, dict):
257 none_keys
= [k
for k
, v
in d2
.items() if v
== None]
258 none_keys_present
= [k
for k
in d1
.keys() if k
in none_keys
]
259 for k
in none_keys_present
:
261 acc
, "d1 has key '{}' which is not supposed to be present".format(k
)
263 keys
= [k
for k
, v
in d2
.items() if v
!= None]
264 invalid_keys_intersection
= [k
for k
in keys
if k
not in d1
.keys()]
265 for k
in invalid_keys_intersection
:
266 acc
= add_error(acc
, "d2 has key '{}' which is not present in d1".format(k
))
267 valid_keys_intersection
= [k
for k
in keys
if k
in d1
.keys()]
268 for k
in valid_keys_intersection
:
270 acc
, gen_json_diff_report(d1
[k
], d2
[k
], exact
=exact
, path
=add_key(k
))
275 "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format(
276 json_type(d1
), json_type(d2
)
284 def json_cmp(d1
, d2
, exact
=False):
286 JSON compare function. Receives two parameters:
287 * `d1`: parsed JSON data structure
288 * `d2`: parsed JSON data structure
290 Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
291 in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an
292 error report is generated and wrapped in a 'json_cmp_result()'. There are special
293 parameters and notations explained below which can be used to cover rather unusual
296 * when 'exact is set to 'True' then d1 and d2 are tested for equality (including
297 order within JSON Arrays)
298 * using 'null' (or 'None' in Python) as JSON Object value is checking for key
300 * using '*' as JSON Object value or Array value is checking for presence in d1
301 without checking the values
302 * using '__ordered__' as first element in a JSON Array in d2 will also check the
303 order when it is compared to an Array in d1
306 (errors_n
, errors
) = gen_json_diff_report(deepcopy(d1
), deepcopy(d2
), exact
=exact
)
309 result
= json_cmp_result()
310 result
.add_error(errors
)
316 def router_output_cmp(router
, cmd
, expected
):
318 Runs `cmd` in router and compares the output with `expected`.
321 normalize_text(router
.vtysh_cmd(cmd
)),
322 normalize_text(expected
),
323 title1
="Current output",
324 title2
="Expected output",
328 def router_json_cmp(router
, cmd
, data
, exact
=False):
330 Runs `cmd` that returns JSON data (normally the command ends with 'json')
331 and compare with `data` contents.
333 return json_cmp(router
.vtysh_cmd(cmd
, isjson
=True), data
, exact
)
336 def run_and_expect(func
, what
, count
=20, wait
=3):
338 Run `func` and compare the result with `what`. Do it for `count` times
339 waiting `wait` seconds between tries. By default it tries 20 times with
340 3 seconds delay between tries.
342 Returns (True, func-return) on success or
343 (False, func-return) on failure.
347 Helper functions to use with this function:
351 start_time
= time
.time()
352 func_name
= "<unknown>"
353 if func
.__class
__ == functools
.partial
:
354 func_name
= func
.func
.__name
__
356 func_name
= func
.__name
__
359 "'{}' polling started (interval {} secs, maximum {} tries)".format(
360 func_name
, wait
, count
371 end_time
= time
.time()
373 "'{}' succeeded after {:.2f} seconds".format(
374 func_name
, end_time
- start_time
377 return (True, result
)
379 end_time
= time
.time()
381 "'{}' failed after {:.2f} seconds".format(func_name
, end_time
- start_time
)
383 return (False, result
)
386 def run_and_expect_type(func
, etype
, count
=20, wait
=3, avalue
=None):
388 Run `func` and compare the result with `etype`. Do it for `count` times
389 waiting `wait` seconds between tries. By default it tries 20 times with
390 3 seconds delay between tries.
392 This function is used when you want to test the return type and,
393 optionally, the return value.
395 Returns (True, func-return) on success or
396 (False, func-return) on failure.
398 start_time
= time
.time()
399 func_name
= "<unknown>"
400 if func
.__class
__ == functools
.partial
:
401 func_name
= func
.func
.__name
__
403 func_name
= func
.__name
__
406 "'{}' polling started (interval {} secs, maximum wait {} secs)".format(
407 func_name
, wait
, int(wait
* count
)
413 if not isinstance(result
, etype
):
415 "Expected result type '{}' got '{}' instead".format(etype
, type(result
))
421 if etype
!= type(None) and avalue
!= None and result
!= avalue
:
422 logger
.debug("Expected value '{}' got '{}' instead".format(avalue
, result
))
427 end_time
= time
.time()
429 "'{}' succeeded after {:.2f} seconds".format(
430 func_name
, end_time
- start_time
433 return (True, result
)
435 end_time
= time
.time()
437 "'{}' failed after {:.2f} seconds".format(func_name
, end_time
- start_time
)
439 return (False, result
)
442 def router_json_cmp_retry(router
, cmd
, data
, exact
=False, retry_timeout
=10.0):
444 Runs `cmd` that returns JSON data (normally the command ends with 'json')
445 and compare with `data` contents. Retry by default for 10 seconds
449 return router_json_cmp(router
, cmd
, data
, exact
)
451 ok
, _
= run_and_expect(test_func
, None, int(retry_timeout
), 1)
456 "Converting Integer to DPID"
460 dpid
= "0" * (16 - len(dpid
)) + dpid
464 "Unable to derive default datapath ID - "
465 "please either specify a dpid or use a "
466 "canonical switch name such as s23."
471 "Check whether pid exists in the current process table."
476 os
.waitpid(pid
, os
.WNOHANG
)
481 except OSError as err
:
482 if err
.errno
== errno
.ESRCH
:
483 # ESRCH == No such process
485 elif err
.errno
== errno
.EPERM
:
486 # EPERM clearly means there's a process to deny access to
489 # According to "man 2 kill" possible error values are
490 # (EINVAL, EPERM, ESRCH)
496 def get_textdiff(text1
, text2
, title1
="", title2
="", **opts
):
497 "Returns empty string if same or formatted diff"
500 difflib
.unified_diff(text1
, text2
, fromfile
=title1
, tofile
=title2
, **opts
)
502 # Clean up line endings
503 diff
= os
.linesep
.join([s
for s
in diff
.splitlines() if s
])
507 def difflines(text1
, text2
, title1
="", title2
="", **opts
):
508 "Wrapper for get_textdiff to avoid string transformations."
509 text1
= ("\n".join(text1
.rstrip().splitlines()) + "\n").splitlines(1)
510 text2
= ("\n".join(text2
.rstrip().splitlines()) + "\n").splitlines(1)
511 return get_textdiff(text1
, text2
, title1
, title2
, **opts
)
514 def get_file(content
):
516 Generates a temporary file in '/tmp' with `content` and returns the file name.
518 if isinstance(content
, list) or isinstance(content
, tuple):
519 content
= "\n".join(content
)
520 fde
= tempfile
.NamedTemporaryFile(mode
="w", delete
=False)
527 def normalize_text(text
):
529 Strips formating spaces/tabs, carriage returns and trailing whitespace.
531 text
= re
.sub(r
"[ \t]+", " ", text
)
532 text
= re
.sub(r
"\r", "", text
)
534 # Remove whitespace in the middle of text.
535 text
= re
.sub(r
"[ \t]+\n", "\n", text
)
536 # Remove whitespace at the end of the text.
544 Parses unix name output to check if running on GNU/Linux.
546 Returns True if running on Linux, returns False otherwise.
549 if os
.uname()[0] == "Linux":
554 def iproute2_is_vrf_capable():
556 Checks if the iproute2 version installed on the system is capable of
557 handling VRFs by interpreting the output of the 'ip' utility found in PATH.
559 Returns True if capability can be detected, returns False otherwise.
564 subp
= subprocess
.Popen(
565 ["ip", "route", "show", "vrf"],
566 stdout
=subprocess
.PIPE
,
567 stderr
=subprocess
.PIPE
,
568 stdin
=subprocess
.PIPE
,
570 iproute2_err
= subp
.communicate()[1].splitlines()[0].split()[0]
572 if iproute2_err
!= "Error:":
579 def module_present_linux(module
, load
):
581 Returns whether `module` is present.
583 If `load` is true, it will try to load it via modprobe.
585 with
open("/proc/modules", "r") as modules_file
:
586 if module
.replace("-", "_") in modules_file
.read():
588 cmd
= "/sbin/modprobe {}{}".format("" if load
else "-n ", module
)
589 if os
.system(cmd
) != 0:
595 def module_present_freebsd(module
, load
):
599 def module_present(module
, load
=True):
600 if sys
.platform
.startswith("linux"):
601 return module_present_linux(module
, load
)
602 elif sys
.platform
.startswith("freebsd"):
603 return module_present_freebsd(module
, load
)
606 def version_cmp(v1
, v2
):
608 Compare two version strings and returns:
610 * `-1`: if `v1` is less than `v2`
611 * `0`: if `v1` is equal to `v2`
612 * `1`: if `v1` is greater than `v2`
614 Raises `ValueError` if versions are not well formated.
616 vregex
= r
"(?P<whole>\d+(\.(\d+))*)"
617 v1m
= re
.match(vregex
, v1
)
618 v2m
= re
.match(vregex
, v2
)
619 if v1m
is None or v2m
is None:
620 raise ValueError("got a invalid version string")
623 v1g
= v1m
.group("whole").split(".")
624 v2g
= v2m
.group("whole").split(".")
626 # Get the longest version string
631 # Reverse list because we are going to pop the tail
634 for _
in range(vnum
):
662 def interface_set_status(node
, ifacename
, ifaceaction
=False, vrf_name
=None):
664 str_ifaceaction
= "no shutdown"
666 str_ifaceaction
= "shutdown"
668 cmd
= 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
669 ifacename
, str_ifaceaction
673 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
674 ifacename
, vrf_name
, str_ifaceaction
680 def ip4_route_zebra(node
, vrf_name
=None):
682 Gets an output of 'show ip route' command. It can be used
683 with comparing the output to a reference
686 tmp
= node
.vtysh_cmd("show ip route")
688 tmp
= node
.vtysh_cmd("show ip route vrf {0}".format(vrf_name
))
689 output
= re
.sub(r
" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp
)
691 lines
= output
.splitlines()
693 while lines
and (not lines
[0].strip() or not header_found
):
694 if "o - offload failure" in lines
[0]:
697 return "\n".join(lines
)
700 def ip6_route_zebra(node
, vrf_name
=None):
702 Retrieves the output of 'show ipv6 route [vrf vrf_name]', then
703 canonicalizes it by eliding link-locals.
707 tmp
= node
.vtysh_cmd("show ipv6 route")
709 tmp
= node
.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name
))
712 output
= re
.sub(r
" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp
)
714 # Mask out the link-local addresses
715 output
= re
.sub(r
"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output
)
717 lines
= output
.splitlines()
719 while lines
and (not lines
[0].strip() or not header_found
):
720 if "o - offload failure" in lines
[0]:
724 return "\n".join(lines
)
727 def proto_name_to_number(protocol
):
742 ) # default return same as input
747 Gets a structured return of the command 'ip route'. It can be used in
748 conjuction with json_cmp() to provide accurate assert explanations.
763 output
= normalize_text(node
.run("ip route")).splitlines()
766 columns
= line
.split(" ")
767 route
= result
[columns
[0]] = {}
769 for column
in columns
:
771 route
["dev"] = column
773 route
["via"] = column
775 # translate protocol names back to numbers
776 route
["proto"] = proto_name_to_number(column
)
778 route
["metric"] = column
780 route
["scope"] = column
786 def ip4_vrf_route(node
):
788 Gets a structured return of the command 'ip route show vrf {0}-cust1'.
789 It can be used in conjuction with json_cmp() to provide accurate assert explanations.
804 output
= normalize_text(
805 node
.run("ip route show vrf {0}-cust1".format(node
.name
))
810 columns
= line
.split(" ")
811 route
= result
[columns
[0]] = {}
813 for column
in columns
:
815 route
["dev"] = column
817 route
["via"] = column
819 # translate protocol names back to numbers
820 route
["proto"] = proto_name_to_number(column
)
822 route
["metric"] = column
824 route
["scope"] = column
832 Gets a structured return of the command 'ip -6 route'. It can be used in
833 conjuction with json_cmp() to provide accurate assert explanations.
847 output
= normalize_text(node
.run("ip -6 route")).splitlines()
850 columns
= line
.split(" ")
851 route
= result
[columns
[0]] = {}
853 for column
in columns
:
855 route
["dev"] = column
857 route
["via"] = column
859 # translate protocol names back to numbers
860 route
["proto"] = proto_name_to_number(column
)
862 route
["metric"] = column
864 route
["pref"] = column
870 def ip6_vrf_route(node
):
872 Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'.
873 It can be used in conjuction with json_cmp() to provide accurate assert explanations.
887 output
= normalize_text(
888 node
.run("ip -6 route show vrf {0}-cust1".format(node
.name
))
892 columns
= line
.split(" ")
893 route
= result
[columns
[0]] = {}
895 for column
in columns
:
897 route
["dev"] = column
899 route
["via"] = column
901 # translate protocol names back to numbers
902 route
["proto"] = proto_name_to_number(column
)
904 route
["metric"] = column
906 route
["pref"] = column
914 Gets a structured return of the command 'ip rule'. It can be used in
915 conjuction with json_cmp() to provide accurate assert explanations.
931 "from": "1.2.0.0/16",
936 output
= normalize_text(node
.run("ip rule")).splitlines()
939 columns
= line
.split(" ")
942 # remove last character, since it is ':'
943 pref
= columns
[0][:-1]
946 for column
in columns
:
948 route
["from"] = column
952 route
["proto"] = column
954 route
["iif"] = column
956 route
["fwmark"] = column
963 def sleep(amount
, reason
=None):
965 Sleep wrapper that registers in the log the amount of sleep
968 logger
.info("Sleeping for {} seconds".format(amount
))
970 logger
.info(reason
+ " ({} seconds)".format(amount
))
975 def checkAddressSanitizerError(output
, router
, component
, logdir
=""):
976 "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
978 def processAddressSanitizerError(asanErrorRe
, output
, router
, component
):
980 "%s: %s triggered an exception by AddressSanitizer\n" % (router
, component
)
982 # Sanitizer Error found in log
983 pidMark
= asanErrorRe
.group(1)
984 addressSanitizerLog
= re
.search(
985 "%s(.*)%s" % (pidMark
, pidMark
), output
, re
.DOTALL
987 if addressSanitizerLog
:
988 # Find Calling Test. Could be multiple steps back
989 testframe
= sys
._current
_frames
().values()[0]
992 test
= os
.path
.splitext(
993 os
.path
.basename(testframe
.f_globals
["__file__"])
995 if (test
!= "topotest") and (test
!= "topogen"):
996 # Found the calling test
997 callingTest
= os
.path
.basename(testframe
.f_globals
["__file__"])
1000 testframe
= testframe
.f_back
1002 # somehow couldn't find the test script.
1003 callingTest
= "unknownTest"
1005 # Now finding Calling Procedure
1008 callingProc
= sys
._getframe
(level
).f_code
.co_name
1010 (callingProc
!= "processAddressSanitizerError")
1011 and (callingProc
!= "checkAddressSanitizerError")
1012 and (callingProc
!= "checkRouterCores")
1013 and (callingProc
!= "stopRouter")
1014 and (callingProc
!= "stop")
1015 and (callingProc
!= "stop_topology")
1016 and (callingProc
!= "checkRouterRunning")
1017 and (callingProc
!= "check_router_running")
1018 and (callingProc
!= "routers_have_failure")
1020 # Found the calling test
1024 # something wrong - couldn't found the calling test function
1025 callingProc
= "unknownProc"
1026 with
open("/tmp/AddressSanitzer.txt", "a") as addrSanFile
:
1028 "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1029 % (callingTest
, callingProc
, router
)
1032 "\n".join(addressSanitizerLog
.group(1).splitlines()) + "\n"
1034 addrSanFile
.write("## Error: %s\n\n" % asanErrorRe
.group(2))
1036 "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1037 % (callingTest
, callingProc
, router
)
1041 + "\n ".join(addressSanitizerLog
.group(1).splitlines())
1044 addrSanFile
.write("\n---------------\n")
1047 addressSanitizerError
= re
.search(
1048 r
"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
1050 if addressSanitizerError
:
1051 processAddressSanitizerError(addressSanitizerError
, output
, router
, component
)
1054 # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
1056 filepattern
= logdir
+ "/" + router
+ "/" + component
+ ".asan.*"
1058 "Log check for %s on %s, pattern %s\n" % (component
, router
, filepattern
)
1060 for file in glob
.glob(filepattern
):
1061 with
open(file, "r") as asanErrorFile
:
1062 asanError
= asanErrorFile
.read()
1063 addressSanitizerError
= re
.search(
1064 r
"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
1066 if addressSanitizerError
:
1067 processAddressSanitizerError(
1068 addressSanitizerError
, asanError
, router
, component
1074 def _sysctl_atleast(commander
, variable
, min_value
):
1075 if isinstance(min_value
, tuple):
1076 min_value
= list(min_value
)
1077 is_list
= isinstance(min_value
, list)
1079 sval
= commander
.cmd_raises("sysctl -n " + variable
).strip()
1081 cur_val
= [int(x
) for x
in sval
.split()]
1087 for i
, v
in enumerate(cur_val
):
1088 if v
< min_value
[i
]:
1093 if cur_val
< min_value
:
1097 valstr
= " ".join([str(x
) for x
in min_value
])
1099 valstr
= str(min_value
)
1100 logger
.info("Increasing sysctl %s from %s to %s", variable
, cur_val
, valstr
)
1101 commander
.cmd_raises('sysctl -w {}="{}"\n'.format(variable
, valstr
))
1104 def _sysctl_assure(commander
, variable
, value
):
1105 if isinstance(value
, tuple):
1107 is_list
= isinstance(value
, list)
1109 sval
= commander
.cmd_raises("sysctl -n " + variable
).strip()
1111 cur_val
= [int(x
) for x
in sval
.split()]
1117 for i
, v
in enumerate(cur_val
):
1123 if cur_val
!= str(value
):
1128 valstr
= " ".join([str(x
) for x
in value
])
1131 logger
.info("Changing sysctl %s from %s to %s", variable
, cur_val
, valstr
)
1132 commander
.cmd_raises('sysctl -w {}="{}"\n'.format(variable
, valstr
))
1135 def sysctl_atleast(commander
, variable
, min_value
, raises
=False):
1137 if commander
is None:
1138 commander
= micronet
.Commander("topotest")
1139 return _sysctl_atleast(commander
, variable
, min_value
)
1140 except subprocess
.CalledProcessError
as error
:
1142 "%s: Failed to assure sysctl min value %s = %s",
1151 def sysctl_assure(commander
, variable
, value
, raises
=False):
1153 if commander
is None:
1154 commander
= micronet
.Commander("topotest")
1155 return _sysctl_assure(commander
, variable
, value
)
1156 except subprocess
.CalledProcessError
as error
:
1158 "%s: Failed to assure sysctl value %s = %s",
1168 def rlimit_atleast(rname
, min_value
, raises
=False):
1170 cval
= resource
.getrlimit(rname
)
1172 if soft
< min_value
:
1173 nval
= (min_value
, hard
if min_value
< hard
else min_value
)
1174 logger
.info("Increasing rlimit %s from %s to %s", rname
, cval
, nval
)
1175 resource
.setrlimit(rname
, nval
)
1176 except subprocess
.CalledProcessError
as error
:
1178 "Failed to assure rlimit [%s] = %s", rname
, min_value
, exc_info
=True
1184 def fix_netns_limits(ns
):
1186 # Maximum read and write socket buffer sizes
1187 sysctl_atleast(ns
, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20])
1188 sysctl_atleast(ns
, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20])
1190 sysctl_assure(ns
, "net.ipv4.conf.all.rp_filter", 0)
1191 sysctl_assure(ns
, "net.ipv4.conf.default.rp_filter", 0)
1192 sysctl_assure(ns
, "net.ipv4.conf.lo.rp_filter", 0)
1194 sysctl_assure(ns
, "net.ipv4.conf.all.forwarding", 1)
1195 sysctl_assure(ns
, "net.ipv4.conf.default.forwarding", 1)
1197 # XXX if things fail look here as this wasn't done previously
1198 sysctl_assure(ns
, "net.ipv6.conf.all.forwarding", 1)
1199 sysctl_assure(ns
, "net.ipv6.conf.default.forwarding", 1)
1202 sysctl_assure(ns
, "net.ipv4.conf.default.arp_announce", 2)
1203 sysctl_assure(ns
, "net.ipv4.conf.default.arp_notify", 1)
1204 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1205 sysctl_assure(ns
, "net.ipv4.conf.default.arp_ignore", 0)
1206 sysctl_assure(ns
, "net.ipv4.conf.all.arp_announce", 2)
1207 sysctl_assure(ns
, "net.ipv4.conf.all.arp_notify", 1)
1208 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1209 sysctl_assure(ns
, "net.ipv4.conf.all.arp_ignore", 0)
1211 sysctl_assure(ns
, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
1213 # Keep ipv6 permanent addresses on an admin down
1214 sysctl_assure(ns
, "net.ipv6.conf.all.keep_addr_on_down", 1)
1215 if version_cmp(platform
.release(), "4.20") >= 0:
1216 sysctl_assure(ns
, "net.ipv6.route.skip_notify_on_dev_down", 1)
1218 sysctl_assure(ns
, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
1219 sysctl_assure(ns
, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
1222 sysctl_atleast(ns
, "net.ipv4.igmp_max_memberships", 1000)
1224 # Use neigh information on selection of nexthop for multipath hops
1225 sysctl_assure(ns
, "net.ipv4.fib_multipath_use_neigh", 1)
1228 def fix_host_limits():
1229 """Increase system limits."""
1231 rlimit_atleast(resource
.RLIMIT_NPROC
, 8 * 1024)
1232 rlimit_atleast(resource
.RLIMIT_NOFILE
, 16 * 1024)
1233 sysctl_atleast(None, "fs.file-max", 16 * 1024)
1234 sysctl_atleast(None, "kernel.pty.max", 16 * 1024)
1237 # Original on ubuntu 17.x, but apport won't save as in namespace
1238 # |/usr/share/apport/apport %p %s %c %d %P
1239 sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
1240 sysctl_assure(None, "kernel.core_uses_pid", 1)
1241 sysctl_assure(None, "fs.suid_dumpable", 1)
1243 # Maximum connection backlog
1244 sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
1246 # Maximum read and write socket buffer sizes
1247 sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20)
1248 sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20)
1250 # Garbage Collection Settings for ARP and Neighbors
1251 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
1252 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024)
1253 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024)
1254 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024)
1255 # Hold entries for 10 minutes
1256 sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1257 sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1260 sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
1263 sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
1265 # Increase routing table size to 128K
1266 sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024)
1267 sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024)
1270 def setup_node_tmpdir(logdir
, name
):
1271 # Cleanup old log, valgrind, and core files.
1272 subprocess
.check_call(
1273 "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir
, name
), shell
=True
1276 # Setup the per node directory.
1277 nodelogdir
= "{}/{}".format(logdir
, name
)
1278 subprocess
.check_call(
1279 "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir
), shell
=True
1281 logfile
= "{0}/{1}.log".format(logdir
, name
)
1286 "A Node with IPv4/IPv6 forwarding enabled"
1288 def __init__(self
, name
, **params
):
1290 # Backward compatibility:
1291 # Load configuration defaults like topogen.
1292 self
.config_defaults
= configparser
.ConfigParser(
1294 "verbosity": "info",
1295 "frrdir": "/usr/lib/frr",
1296 "routertype": "frr",
1301 self
.config_defaults
.read(
1302 os
.path
.join(os
.path
.dirname(os
.path
.realpath(__file__
)), "../pytest.ini")
1305 # If this topology is using old API and doesn't have logdir
1306 # specified, then attempt to generate an unique logdir.
1307 self
.logdir
= params
.get("logdir")
1308 if self
.logdir
is None:
1309 self
.logdir
= get_logs_path(g_extra_config
["rundir"])
1311 if not params
.get("logger"):
1312 # If logger is present topogen has already set this up
1313 logfile
= setup_node_tmpdir(self
.logdir
, name
)
1314 l
= topolog
.get_logger(name
, log_level
="debug", target
=logfile
)
1315 params
["logger"] = l
1317 super(Router
, self
).__init
__(name
, **params
)
1319 self
.daemondir
= None
1320 self
.hasmpls
= False
1321 self
.routertype
= "frr"
1322 self
.unified_config
= None
1343 self
.daemons_options
= {"zebra": ""}
1344 self
.reportCores
= True
1347 self
.ns_cmd
= "sudo nsenter -a -t {} ".format(self
.pid
)
1349 # Allow escaping from running inside docker
1350 cgroup
= open("/proc/1/cgroup").read()
1351 m
= re
.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup
)
1353 self
.ns_cmd
= "docker exec -it {} ".format(m
.group(1)) + self
.ns_cmd
1357 logger
.debug("CMD to enter {}: {}".format(self
.name
, self
.ns_cmd
))
1359 def _config_frr(self
, **params
):
1360 "Configure FRR binaries"
1361 self
.daemondir
= params
.get("frrdir")
1362 if self
.daemondir
is None:
1363 self
.daemondir
= self
.config_defaults
.get("topogen", "frrdir")
1365 zebra_path
= os
.path
.join(self
.daemondir
, "zebra")
1366 if not os
.path
.isfile(zebra_path
):
1367 raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path
))
1369 # pylint: disable=W0221
1370 # Some params are only meaningful for the parent class.
1371 def config(self
, **params
):
1372 super(Router
, self
).config(**params
)
1374 # User did not specify the daemons directory, try to autodetect it.
1375 self
.daemondir
= params
.get("daemondir")
1376 if self
.daemondir
is None:
1377 self
.routertype
= params
.get(
1378 "routertype", self
.config_defaults
.get("topogen", "routertype")
1380 self
._config
_frr
(**params
)
1382 # Test the provided path
1383 zpath
= os
.path
.join(self
.daemondir
, "zebra")
1384 if not os
.path
.isfile(zpath
):
1385 raise Exception("No zebra binary found in {}".format(zpath
))
1386 # Allow user to specify routertype when the path was specified.
1387 if params
.get("routertype") is not None:
1388 self
.routertype
= params
.get("routertype")
1390 # Set ownership of config files
1391 self
.cmd("chown {0}:{0}vty /etc/{0}".format(self
.routertype
))
1393 def terminate(self
):
1394 # Stop running FRR daemons
1396 super(Router
, self
).terminate()
1397 os
.system("chmod -R go+rw " + self
.logdir
)
1399 # Return count of running daemons
1400 def listDaemons(self
):
1402 rc
, stdout
, _
= self
.cmd_status(
1403 "ls -1 /var/run/%s/*.pid" % self
.routertype
, warn
=False
1407 for d
in stdout
.strip().split("\n"):
1410 pid
= int(self
.cmd_raises("cat %s" % pidfile
, warn
=False).strip())
1411 name
= os
.path
.basename(pidfile
[:-4])
1413 # probably not compatible with bsd.
1414 rc
, _
, _
= self
.cmd_status("test -d /proc/{}".format(pid
), warn
=False)
1417 "%s: %s exited leaving pidfile %s (%s)",
1423 self
.cmd("rm -- " + pidfile
)
1425 ret
.append((name
, pid
))
1426 except (subprocess
.CalledProcessError
, ValueError):
1430 def stopRouter(self
, assertOnError
=True, minErrorVersion
="5.1"):
1431 # Stop Running FRR Daemons
1432 running
= self
.listDaemons()
1436 logger
.info("%s: stopping %s", self
.name
, ", ".join([x
[0] for x
in running
]))
1437 for name
, pid
in running
:
1438 logger
.info("{}: sending SIGTERM to {}".format(self
.name
, name
))
1440 os
.kill(pid
, signal
.SIGTERM
)
1441 except OSError as err
:
1443 "%s: could not kill %s (%s): %s", self
.name
, name
, pid
, str(err
)
1446 running
= self
.listDaemons()
1448 for _
in range(0, 30):
1451 "{}: waiting for daemons stopping: {}".format(
1452 self
.name
, ", ".join([x
[0] for x
in running
])
1455 running
= self
.listDaemons()
1463 "%s: sending SIGBUS to: %s", self
.name
, ", ".join([x
[0] for x
in running
])
1465 for name
, pid
in running
:
1466 pidfile
= "/var/run/{}/{}.pid".format(self
.routertype
, name
)
1467 logger
.info("%s: killing %s", self
.name
, name
)
1468 self
.cmd("kill -SIGBUS %d" % pid
)
1469 self
.cmd("rm -- " + pidfile
)
1472 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self
.name
1475 errors
= self
.checkRouterCores(reportOnce
=True)
1476 if self
.checkRouterVersion("<", minErrorVersion
):
1477 # ignore errors in old versions
1479 if assertOnError
and (errors
is not None) and len(errors
) > 0:
1480 assert "Errors found - details follow:" == 0, errors
1483 def removeIPs(self
):
1484 for interface
in self
.intfNames():
1486 self
.intf_ip_cmd(interface
, "ip address flush " + interface
)
1487 except Exception as ex
:
1488 logger
.error("%s can't remove IPs %s", self
, str(ex
))
1490 # assert False, "can't remove IPs %s" % str(ex)
1492 def checkCapability(self
, daemon
, param
):
1493 if param
is not None:
1494 daemon_path
= os
.path
.join(self
.daemondir
, daemon
)
1495 daemon_search_option
= param
.replace("-", "")
1497 "{0} -h | grep {1}".format(daemon_path
, daemon_search_option
)
1499 if daemon_search_option
not in output
:
1503 def loadConf(self
, daemon
, source
=None, param
=None):
1504 """Enabled and set config for a daemon.
1506 Arranges for loading of daemon configuration from the specified source. Possible
1507 `source` values are `None` for an empty config file, a path name which is used
1508 directly, or a file name with no path components which is first looked for
1509 directly and then looked for under a sub-directory named after router.
1512 # Unfortunately this API allowsfor source to not exist for any and all routers.
1514 head
, tail
= os
.path
.split(source
)
1515 if not head
and not self
.path_exists(tail
):
1516 script_dir
= os
.environ
["PYTEST_TOPOTEST_SCRIPTDIR"]
1517 router_relative
= os
.path
.join(script_dir
, self
.name
, tail
)
1518 if self
.path_exists(router_relative
):
1519 source
= router_relative
1521 "using router relative configuration: {}".format(source
)
1524 # print "Daemons before:", self.daemons
1525 if daemon
in self
.daemons
.keys() or daemon
== "frr":
1527 self
.unified_config
= 1
1529 self
.daemons
[daemon
] = 1
1530 if param
is not None:
1531 self
.daemons_options
[daemon
] = param
1532 conf_file
= "/etc/{}/{}.conf".format(self
.routertype
, daemon
)
1533 if source
is None or not os
.path
.exists(source
):
1534 if daemon
== "frr" or not self
.unified_config
:
1535 self
.cmd_raises("rm -f " + conf_file
)
1536 self
.cmd_raises("touch " + conf_file
)
1538 self
.cmd_raises("cp {} {}".format(source
, conf_file
))
1540 if not self
.unified_config
or daemon
== "frr":
1541 self
.cmd_raises("chown {0}:{0} {1}".format(self
.routertype
, conf_file
))
1542 self
.cmd_raises("chmod 664 {}".format(conf_file
))
1544 if (daemon
== "snmpd") and (self
.routertype
== "frr"):
1545 # /etc/snmp is private mount now
1546 self
.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf')
1547 self
.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
1549 if (daemon
== "zebra") and (self
.daemons
["staticd"] == 0):
1550 # Add staticd with zebra - if it exists
1552 staticd_path
= os
.path
.join(self
.daemondir
, "staticd")
1556 if os
.path
.isfile(staticd_path
):
1557 self
.daemons
["staticd"] = 1
1558 self
.daemons_options
["staticd"] = ""
1559 # Auto-Started staticd has no config, so it will read from zebra config
1561 logger
.info("No daemon {} known".format(daemon
))
1562 # print "Daemons after:", self.daemons
1564 def runInWindow(self
, cmd
, title
=None):
1565 return self
.run_in_window(cmd
, title
)
1567 def startRouter(self
, tgen
=None):
1568 if self
.unified_config
:
1570 'echo "service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1574 # Disable integrated-vtysh-config
1576 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1581 "chown %s:%svty /etc/%s/vtysh.conf"
1582 % (self
.routertype
, self
.routertype
, self
.routertype
)
1584 # TODO remove the following lines after all tests are migrated to Topogen.
1585 # Try to find relevant old logfiles in /tmp and delete them
1586 map(os
.remove
, glob
.glob("{}/{}/*.log".format(self
.logdir
, self
.name
)))
1587 # Remove old core files
1588 map(os
.remove
, glob
.glob("{}/{}/*.dmp".format(self
.logdir
, self
.name
)))
1589 # Remove IP addresses from OS first - we have them in zebra.conf
1591 # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
1592 # No error - but return message and skip all the tests
1593 if self
.daemons
["ldpd"] == 1:
1594 ldpd_path
= os
.path
.join(self
.daemondir
, "ldpd")
1595 if not os
.path
.isfile(ldpd_path
):
1596 logger
.info("LDP Test, but no ldpd compiled or installed")
1597 return "LDP Test, but no ldpd compiled or installed"
1599 if version_cmp(platform
.release(), "4.5") < 0:
1600 logger
.info("LDP Test need Linux Kernel 4.5 minimum")
1601 return "LDP Test need Linux Kernel 4.5 minimum"
1602 # Check if have mpls
1604 self
.hasmpls
= tgen
.hasmpls
1605 if self
.hasmpls
!= True:
1607 "LDP/MPLS Tests will be skipped, platform missing module(s)"
1610 # Test for MPLS Kernel modules available
1611 self
.hasmpls
= False
1612 if not module_present("mpls-router"):
1614 "MPLS tests will not run (missing mpls-router kernel module)"
1616 elif not module_present("mpls-iptunnel"):
1618 "MPLS tests will not run (missing mpls-iptunnel kernel module)"
1622 if self
.hasmpls
!= True:
1623 return "LDP/MPLS Tests need mpls kernel modules"
1625 # Really want to use sysctl_atleast here, but only when MPLS is actually being
1627 self
.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
1629 shell_routers
= g_extra_config
["shell"]
1630 if "all" in shell_routers
or self
.name
in shell_routers
:
1631 self
.run_in_window(os
.getenv("SHELL", "bash"), title
="sh-%s" % self
.name
)
1633 if self
.daemons
["eigrpd"] == 1:
1634 eigrpd_path
= os
.path
.join(self
.daemondir
, "eigrpd")
1635 if not os
.path
.isfile(eigrpd_path
):
1636 logger
.info("EIGRP Test, but no eigrpd compiled or installed")
1637 return "EIGRP Test, but no eigrpd compiled or installed"
1639 if self
.daemons
["bfdd"] == 1:
1640 bfdd_path
= os
.path
.join(self
.daemondir
, "bfdd")
1641 if not os
.path
.isfile(bfdd_path
):
1642 logger
.info("BFD Test, but no bfdd compiled or installed")
1643 return "BFD Test, but no bfdd compiled or installed"
1645 status
= self
.startRouterDaemons(tgen
=tgen
)
1647 vtysh_routers
= g_extra_config
["vtysh"]
1648 if "all" in vtysh_routers
or self
.name
in vtysh_routers
:
1649 self
.run_in_window("vtysh", title
="vt-%s" % self
.name
)
1651 if self
.unified_config
:
1652 self
.cmd("vtysh -f /etc/frr/frr.conf")
1656 def getStdErr(self
, daemon
):
1657 return self
.getLog("err", daemon
)
1659 def getStdOut(self
, daemon
):
1660 return self
.getLog("out", daemon
)
1662 def getLog(self
, log
, daemon
):
1663 return self
.cmd("cat {}/{}/{}.{}".format(self
.logdir
, self
.name
, daemon
, log
))
1665 def startRouterDaemons(self
, daemons
=None, tgen
=None):
1666 "Starts FRR daemons for this router."
1668 asan_abort
= g_extra_config
["asan_abort"]
1669 gdb_breakpoints
= g_extra_config
["gdb_breakpoints"]
1670 gdb_daemons
= g_extra_config
["gdb_daemons"]
1671 gdb_routers
= g_extra_config
["gdb_routers"]
1672 valgrind_extra
= g_extra_config
["valgrind_extra"]
1673 valgrind_memleaks
= g_extra_config
["valgrind_memleaks"]
1674 strace_daemons
= g_extra_config
["strace_daemons"]
1676 # Get global bundle data
1677 if not self
.path_exists("/etc/frr/support_bundle_commands.conf"):
1678 # Copy global value if was covered by namespace mount
1680 if os
.path
.exists("/etc/frr/support_bundle_commands.conf"):
1681 with
open("/etc/frr/support_bundle_commands.conf", "r") as rf
:
1682 bundle_data
= rf
.read()
1684 "cat > /etc/frr/support_bundle_commands.conf",
1688 # Starts actual daemons without init (ie restart)
1689 # cd to per node directory
1690 self
.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self
.logdir
, self
.name
))
1691 self
.set_cwd("{}/{}".format(self
.logdir
, self
.name
))
1692 self
.cmd("umask 000")
1694 # Re-enable to allow for report per run
1695 self
.reportCores
= True
1697 # XXX: glue code forward ported from removed function.
1698 if self
.version
== None:
1699 self
.version
= self
.cmd(
1700 os
.path
.join(self
.daemondir
, "bgpd") + " -v"
1702 logger
.info("{}: running version: {}".format(self
.name
, self
.version
))
1703 # If `daemons` was specified then some upper API called us with
1704 # specific daemons, otherwise just use our own configuration.
1706 if daemons
is not None:
1707 daemons_list
= daemons
1709 # Append all daemons configured.
1710 for daemon
in self
.daemons
:
1711 if self
.daemons
[daemon
] == 1:
1712 daemons_list
.append(daemon
)
1714 def start_daemon(daemon
, extra_opts
=None):
1715 daemon_opts
= self
.daemons_options
.get(daemon
, "")
1716 rediropt
= " > {0}.out 2> {0}.err".format(daemon
)
1717 if daemon
== "snmpd":
1718 binary
= "/usr/sbin/snmpd"
1720 cmdopt
= "{} -C -c /etc/frr/snmpd.conf -p ".format(
1722 ) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self
.routertype
)
1724 binary
= os
.path
.join(self
.daemondir
, daemon
)
1726 cmdenv
= "ASAN_OPTIONS="
1728 cmdenv
= "abort_on_error=1:"
1729 cmdenv
+= "log_path={0}/{1}.{2}.asan ".format(
1730 self
.logdir
, self
.name
, daemon
1733 if valgrind_memleaks
:
1734 this_dir
= os
.path
.dirname(
1735 os
.path
.abspath(os
.path
.realpath(__file__
))
1737 supp_file
= os
.path
.abspath(
1738 os
.path
.join(this_dir
, "../../../tools/valgrind.supp")
1740 cmdenv
+= " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
1741 daemon
, self
.logdir
, self
.name
, supp_file
1745 " --gen-suppressions=all --expensive-definedness-checks=yes"
1747 elif daemon
in strace_daemons
or "all" in strace_daemons
:
1748 cmdenv
= "strace -f -D -o {1}/{2}.strace.{0} ".format(
1749 daemon
, self
.logdir
, self
.name
1752 cmdopt
= "{} --command-log-always --log file:{}.log --log-level debug".format(
1756 cmdopt
+= " " + extra_opts
1759 (gdb_routers
or gdb_daemons
)
1761 not gdb_routers
or self
.name
in gdb_routers
or "all" in gdb_routers
1763 and (not gdb_daemons
or daemon
in gdb_daemons
or "all" in gdb_daemons
)
1765 if daemon
== "snmpd":
1769 gdbcmd
= "sudo -E gdb " + binary
1771 gdbcmd
+= " -ex 'set breakpoint pending on'"
1772 for bp
in gdb_breakpoints
:
1773 gdbcmd
+= " -ex 'b {}'".format(bp
)
1774 gdbcmd
+= " -ex 'run {}'".format(cmdopt
)
1776 self
.run_in_window(gdbcmd
, daemon
)
1779 "%s: %s %s launched in gdb window", self
, self
.routertype
, daemon
1782 if daemon
!= "snmpd":
1787 self
.cmd_raises(" ".join([cmdenv
, binary
, cmdopt
]), warn
=False)
1788 except subprocess
.CalledProcessError
as error
:
1790 '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
1795 '\n:stdout: "{}"'.format(error
.stdout
.strip())
1798 '\n:stderr: "{}"'.format(error
.stderr
.strip())
1803 logger
.info("%s: %s %s started", self
, self
.routertype
, daemon
)
1806 if "zebra" in daemons_list
:
1807 start_daemon("zebra", "-s 90000000")
1808 while "zebra" in daemons_list
:
1809 daemons_list
.remove("zebra")
1811 # Start staticd next if required
1812 if "staticd" in daemons_list
:
1813 start_daemon("staticd")
1814 while "staticd" in daemons_list
:
1815 daemons_list
.remove("staticd")
1817 if "snmpd" in daemons_list
:
1818 # Give zerbra a chance to configure interface addresses that snmpd daemon
1822 start_daemon("snmpd")
1823 while "snmpd" in daemons_list
:
1824 daemons_list
.remove("snmpd")
1827 # Fix Link-Local Addresses on initial startup
1828 # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
1829 _
, output
, _
= self
.cmd_status(
1830 "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done",
1831 stderr
=subprocess
.STDOUT
,
1833 logger
.debug("Set MACs:\n%s", output
)
1835 # Now start all the other daemons
1836 for daemon
in daemons_list
:
1837 if self
.daemons
[daemon
] == 0:
1839 start_daemon(daemon
)
1841 # Check if daemons are running.
1842 rundaemons
= self
.cmd("ls -1 /var/run/%s/*.pid" % self
.routertype
)
1843 if re
.search(r
"No such file or directory", rundaemons
):
1844 return "Daemons are not running"
1846 # Update the permissions on the log files
1847 self
.cmd("chown frr:frr -R {}/{}".format(self
.logdir
, self
.name
))
1848 self
.cmd("chmod ug+rwX,o+r -R {}/{}".format(self
.logdir
, self
.name
))
1852 def killRouterDaemons(
1853 self
, daemons
, wait
=True, assertOnError
=True, minErrorVersion
="5.1"
1856 # Daemons(user specified daemon only) using SIGKILL
1857 rundaemons
= self
.cmd("ls -1 /var/run/%s/*.pid" % self
.routertype
)
1859 daemonsNotRunning
= []
1860 if re
.search(r
"No such file or directory", rundaemons
):
1862 for daemon
in daemons
:
1863 if rundaemons
is not None and daemon
in rundaemons
:
1865 dmns
= rundaemons
.split("\n")
1866 # Exclude empty string at end of list
1868 if re
.search(r
"%s" % daemon
, d
):
1869 daemonpid
= self
.cmd("cat %s" % d
.rstrip()).rstrip()
1870 if daemonpid
.isdigit() and pid_exists(int(daemonpid
)):
1872 "{}: killing {}".format(
1874 os
.path
.basename(d
.rstrip().rsplit(".", 1)[0]),
1877 self
.cmd("kill -9 %s" % daemonpid
)
1878 if pid_exists(int(daemonpid
)):
1880 while wait
and numRunning
> 0:
1883 "{}: waiting for {} daemon to be stopped".format(
1888 # 2nd round of kill if daemons didn't exit
1890 if re
.search(r
"%s" % daemon
, d
):
1891 daemonpid
= self
.cmd("cat %s" % d
.rstrip()).rstrip()
1892 if daemonpid
.isdigit() and pid_exists(
1896 "{}: killing {}".format(
1899 d
.rstrip().rsplit(".", 1)[0]
1903 self
.cmd("kill -9 %s" % daemonpid
)
1904 if daemonpid
.isdigit() and not pid_exists(
1908 self
.cmd("rm -- {}".format(d
.rstrip()))
1910 errors
= self
.checkRouterCores(reportOnce
=True)
1911 if self
.checkRouterVersion("<", minErrorVersion
):
1912 # ignore errors in old versions
1914 if assertOnError
and len(errors
) > 0:
1915 assert "Errors found - details follow:" == 0, errors
1917 daemonsNotRunning
.append(daemon
)
1918 if len(daemonsNotRunning
) > 0:
1919 errors
= errors
+ "Daemons are not running", daemonsNotRunning
1923 def checkRouterCores(self
, reportLeaks
=True, reportOnce
=False):
1924 if reportOnce
and not self
.reportCores
:
1928 for daemon
in self
.daemons
:
1929 if self
.daemons
[daemon
] == 1:
1930 # Look for core file
1931 corefiles
= glob
.glob(
1932 "{}/{}/{}_core*.dmp".format(self
.logdir
, self
.name
, daemon
)
1934 if len(corefiles
) > 0:
1935 backtrace
= gdb_core(self
, daemon
, corefiles
)
1938 + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s"
1939 % (self
.name
, daemon
, backtrace
)
1943 log
= self
.getStdErr(daemon
)
1944 if "memstats" in log
:
1946 "%s: %s has memory leaks:\n" % (self
.name
, daemon
)
1948 traces
= traces
+ "\n%s: %s has memory leaks:\n" % (
1952 log
= re
.sub("core_handler: ", "", log
)
1954 r
"(showing active allocations in memory group [a-zA-Z0-9]+)",
1958 log
= re
.sub("memstats: ", " ", log
)
1959 sys
.stderr
.write(log
)
1961 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
1962 if checkAddressSanitizerError(
1963 self
.getStdErr(daemon
), self
.name
, daemon
, self
.logdir
1966 "%s: Daemon %s killed by AddressSanitizer" % (self
.name
, daemon
)
1968 traces
= traces
+ "\n%s: Daemon %s killed by AddressSanitizer" % (
1974 self
.reportCores
= False
1977 def checkRouterRunning(self
):
1978 "Check if router daemons are running and collect crashinfo they don't run"
1982 daemonsRunning
= self
.cmd(
1983 'vtysh -c "show logging" | grep "Logging configuration for"'
1985 # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
1986 if checkAddressSanitizerError(daemonsRunning
, self
.name
, "vtysh"):
1987 return "%s: vtysh killed by AddressSanitizer" % (self
.name
)
1989 for daemon
in self
.daemons
:
1990 if daemon
== "snmpd":
1992 if (self
.daemons
[daemon
] == 1) and not (daemon
in daemonsRunning
):
1993 sys
.stderr
.write("%s: Daemon %s not running\n" % (self
.name
, daemon
))
1994 if daemon
== "staticd":
1996 "You may have a copy of staticd installed but are attempting to test against\n"
1999 "a version of FRR that does not have staticd, please cleanup the install dir\n"
2002 # Look for core file
2003 corefiles
= glob
.glob(
2004 "{}/{}/{}_core*.dmp".format(self
.logdir
, self
.name
, daemon
)
2006 if len(corefiles
) > 0:
2007 gdb_core(self
, daemon
, corefiles
)
2009 # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
2011 "{}/{}/{}.log".format(self
.logdir
, self
.name
, daemon
)
2013 log_tail
= subprocess
.check_output(
2015 "tail -n20 {}/{}/{}.log 2> /dev/null".format(
2016 self
.logdir
, self
.name
, daemon
2022 "\nFrom %s %s %s log file:\n"
2023 % (self
.routertype
, self
.name
, daemon
)
2025 sys
.stderr
.write("%s\n" % log_tail
)
2027 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2028 if checkAddressSanitizerError(
2029 self
.getStdErr(daemon
), self
.name
, daemon
, self
.logdir
2031 return "%s: Daemon %s not running - killed by AddressSanitizer" % (
2036 return "%s: Daemon %s not running" % (self
.name
, daemon
)
2039 def checkRouterVersion(self
, cmpop
, version
):
2041 Compares router version using operation `cmpop` with `version`.
2042 Valid `cmpop` values:
2043 * `>=`: has the same version or greater
2044 * '>': has greater version
2045 * '=': has the same version
2046 * '<': has a lesser version
2047 * '<=': has the same version or lesser
2049 Usage example: router.checkRouterVersion('>', '1.0')
2052 # Make sure we have version information first
2053 if self
.version
== None:
2054 self
.version
= self
.cmd(
2055 os
.path
.join(self
.daemondir
, "bgpd") + " -v"
2057 logger
.info("{}: running version: {}".format(self
.name
, self
.version
))
2059 rversion
= self
.version
2060 if rversion
== None:
2063 result
= version_cmp(rversion
, version
)
2077 def get_ipv6_linklocal(self
):
2078 "Get LinkLocal Addresses from interfaces"
2082 ifaces
= self
.cmd("ip -6 address")
2083 # Fix newlines (make them all the same)
2084 ifaces
= ("\n".join(ifaces
.splitlines()) + "\n").splitlines()
2088 m
= re
.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line
)
2090 interface
= m
.group(1)
2093 "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
2098 ll_per_if_count
+= 1
2099 if ll_per_if_count
> 1:
2100 linklocal
+= [["%s-%s" % (interface
, ll_per_if_count
), local
]]
2102 linklocal
+= [[interface
, local
]]
2105 def daemon_available(self
, daemon
):
2106 "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
2108 daemon_path
= os
.path
.join(self
.daemondir
, daemon
)
2109 if not os
.path
.isfile(daemon_path
):
2111 if daemon
== "ldpd":
2112 if version_cmp(platform
.release(), "4.5") < 0:
2114 if not module_present("mpls-router", load
=False):
2116 if not module_present("mpls-iptunnel", load
=False):
2120 def get_routertype(self
):
2121 "Return the type of Router (frr)"
2123 return self
.routertype
2125 def report_memory_leaks(self
, filename_prefix
, testscript
):
2126 "Report Memory Leaks to file prefixed with given string"
2129 filename
= filename_prefix
+ re
.sub(r
"\.py", "", testscript
) + ".txt"
2130 for daemon
in self
.daemons
:
2131 if self
.daemons
[daemon
] == 1:
2132 log
= self
.getStdErr(daemon
)
2133 if "memstats" in log
:
2136 "\nRouter {} {} StdErr Log:\n{}".format(self
.name
, daemon
, log
)
2140 # Check if file already exists
2141 fileexists
= os
.path
.isfile(filename
)
2142 leakfile
= open(filename
, "a")
2144 # New file - add header
2146 "# Memory Leak Detection for topotest %s\n\n"
2149 leakfile
.write("## Router %s\n" % self
.name
)
2150 leakfile
.write("### Process %s\n" % daemon
)
2151 log
= re
.sub("core_handler: ", "", log
)
2153 r
"(showing active allocations in memory group [a-zA-Z0-9]+)",
2157 log
= re
.sub("memstats: ", " ", log
)
2159 leakfile
.write("\n")
2165 """Convert string to unicode, depending on python version"""
2166 if sys
.version_info
[0] > 2:
2169 return unicode(s
) # pylint: disable=E0602
2173 return isinstance(o
, Mapping
)