5 # Library of helper functions for NetDEF Topology Tests
7 # Copyright (c) 2016 by
8 # Network Device Education Foundation, Inc. ("NetDEF")
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
40 from copy
import deepcopy
42 import lib
.topolog
as topolog
43 from lib
.topolog
import logger
45 if sys
.version_info
[0] > 2:
47 from collections
.abc
import Mapping
49 import ConfigParser
as configparser
50 from collections
import Mapping
52 from lib
import micronet
53 from lib
.micronet_compat
import Node
58 def get_logs_path(rundir
):
59 logspath
= topolog
.get_test_logdir()
60 return os
.path
.join(rundir
, logspath
)
63 def gdb_core(obj
, daemon
, corefiles
):
79 gdbcmds
= [["-ex", i
.strip()] for i
in gdbcmds
.strip().split("\n")]
80 gdbcmds
= [item
for sl
in gdbcmds
for item
in sl
]
82 daemon_path
= os
.path
.join(obj
.daemondir
, daemon
)
83 backtrace
= subprocess
.check_output(
84 ["gdb", daemon_path
, corefiles
[0], "--batch"] + gdbcmds
87 "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj
.name
, daemon
)
89 sys
.stderr
.write("%s" % backtrace
)
93 class json_cmp_result(object):
94 "json_cmp result class for better assertion messages"
99 def add_error(self
, error
):
100 "Append error message to the result"
101 for line
in error
.splitlines():
102 self
.errors
.append(line
)
104 def has_errors(self
):
105 "Returns True if there were errors, otherwise False."
106 return len(self
.errors
) > 0
108 def gen_report(self
):
109 headline
= ["Generated JSON diff error report:", ""]
110 return headline
+ self
.errors
114 "Generated JSON diff error report:\n\n\n" + "\n".join(self
.errors
) + "\n\n"
118 def gen_json_diff_report(d1
, d2
, exact
=False, path
="> $", acc
=(0, "")):
120 Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye.
124 if isinstance(v
, (dict, list)):
125 return "\t" + "\t".join(
126 json
.dumps(v
, indent
=4, separators
=(",", ": ")).splitlines(True)
129 return "'{}'".format(v
)
132 if isinstance(v
, (list, tuple)):
134 elif isinstance(v
, dict):
136 elif isinstance(v
, (int, float)):
138 elif isinstance(v
, bool):
140 elif isinstance(v
, str):
145 def get_errors(other_acc
):
148 def get_errors_n(other_acc
):
151 def add_error(acc
, msg
, points
=1):
152 return (acc
[0] + points
, acc
[1] + "{}: {}\n".format(path
, msg
))
154 def merge_errors(acc
, other_acc
):
155 return (acc
[0] + other_acc
[0], acc
[1] + other_acc
[1])
158 return "{}[{}]".format(path
, idx
)
161 return "{}->{}".format(path
, key
)
163 def has_errors(other_acc
):
164 return other_acc
[0] > 0
167 not isinstance(d1
, (list, dict))
168 and not isinstance(d2
, (list, dict))
173 not isinstance(d1
, (list, dict))
174 and not isinstance(d2
, (list, dict))
179 "d1 has element with value '{}' but in d2 it has value '{}'".format(d1
, d2
),
183 and isinstance(d2
, list)
184 and ((len(d2
) > 0 and d2
[0] == "__ordered__") or exact
)
188 if len(d1
) != len(d2
):
191 "d1 has Array of length {} but in d2 it is of length {}".format(
196 for idx
, v1
, v2
in zip(range(0, len(d1
)), d1
, d2
):
198 acc
, gen_json_diff_report(v1
, v2
, exact
=exact
, path
=add_idx(idx
))
200 elif isinstance(d1
, list) and isinstance(d2
, list):
201 if len(d1
) < len(d2
):
204 "d1 has Array of length {} but in d2 it is of length {}".format(
209 for idx2
, v2
in zip(range(0, len(d2
)), d2
):
213 for idx1
, v1
in zip(range(0, len(d1
)), d1
):
214 tmp_v1
= deepcopy(v1
)
215 tmp_v2
= deepcopy(v2
)
216 tmp_diff
= gen_json_diff_report(tmp_v1
, tmp_v2
, path
=add_idx(idx1
))
217 if not has_errors(tmp_diff
):
221 elif not closest_diff
or get_errors_n(tmp_diff
) < get_errors_n(
224 closest_diff
= tmp_diff
226 if not found_match
and isinstance(v2
, (list, dict)):
227 sub_error
= "\n\n\t{}".format(
228 "\t".join(get_errors(closest_diff
).splitlines(True))
233 "d2 has the following element at index {} which is not present in d1: "
234 + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}"
235 ).format(idx2
, dump_json(v2
), closest_idx
, sub_error
),
237 if not found_match
and not isinstance(v2
, (list, dict)):
240 "d2 has the following element at index {} which is not present in d1: {}".format(
244 elif isinstance(d1
, dict) and isinstance(d2
, dict) and exact
:
245 invalid_keys_d1
= [k
for k
in d1
.keys() if k
not in d2
.keys()]
246 invalid_keys_d2
= [k
for k
in d2
.keys() if k
not in d1
.keys()]
247 for k
in invalid_keys_d1
:
248 acc
= add_error(acc
, "d1 has key '{}' which is not present in d2".format(k
))
249 for k
in invalid_keys_d2
:
250 acc
= add_error(acc
, "d2 has key '{}' which is not present in d1".format(k
))
251 valid_keys_intersection
= [k
for k
in d1
.keys() if k
in d2
.keys()]
252 for k
in valid_keys_intersection
:
254 acc
, gen_json_diff_report(d1
[k
], d2
[k
], exact
=exact
, path
=add_key(k
))
256 elif isinstance(d1
, dict) and isinstance(d2
, dict):
257 none_keys
= [k
for k
, v
in d2
.items() if v
== None]
258 none_keys_present
= [k
for k
in d1
.keys() if k
in none_keys
]
259 for k
in none_keys_present
:
261 acc
, "d1 has key '{}' which is not supposed to be present".format(k
)
263 keys
= [k
for k
, v
in d2
.items() if v
!= None]
264 invalid_keys_intersection
= [k
for k
in keys
if k
not in d1
.keys()]
265 for k
in invalid_keys_intersection
:
266 acc
= add_error(acc
, "d2 has key '{}' which is not present in d1".format(k
))
267 valid_keys_intersection
= [k
for k
in keys
if k
in d1
.keys()]
268 for k
in valid_keys_intersection
:
270 acc
, gen_json_diff_report(d1
[k
], d2
[k
], exact
=exact
, path
=add_key(k
))
275 "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format(
276 json_type(d1
), json_type(d2
)
284 def json_cmp(d1
, d2
, exact
=False):
286 JSON compare function. Receives two parameters:
287 * `d1`: parsed JSON data structure
288 * `d2`: parsed JSON data structure
290 Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
291 in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an
292 error report is generated and wrapped in a 'json_cmp_result()'. There are special
293 parameters and notations explained below which can be used to cover rather unusual
296 * when 'exact is set to 'True' then d1 and d2 are tested for equality (including
297 order within JSON Arrays)
298 * using 'null' (or 'None' in Python) as JSON Object value is checking for key
300 * using '*' as JSON Object value or Array value is checking for presence in d1
301 without checking the values
302 * using '__ordered__' as first element in a JSON Array in d2 will also check the
303 order when it is compared to an Array in d1
306 (errors_n
, errors
) = gen_json_diff_report(deepcopy(d1
), deepcopy(d2
), exact
=exact
)
309 result
= json_cmp_result()
310 result
.add_error(errors
)
316 def router_output_cmp(router
, cmd
, expected
):
318 Runs `cmd` in router and compares the output with `expected`.
321 normalize_text(router
.vtysh_cmd(cmd
)),
322 normalize_text(expected
),
323 title1
="Current output",
324 title2
="Expected output",
328 def router_json_cmp(router
, cmd
, data
, exact
=False):
330 Runs `cmd` that returns JSON data (normally the command ends with 'json')
331 and compare with `data` contents.
333 return json_cmp(router
.vtysh_cmd(cmd
, isjson
=True), data
, exact
)
336 def run_and_expect(func
, what
, count
=20, wait
=3):
338 Run `func` and compare the result with `what`. Do it for `count` times
339 waiting `wait` seconds between tries. By default it tries 20 times with
340 3 seconds delay between tries.
342 Returns (True, func-return) on success or
343 (False, func-return) on failure.
347 Helper functions to use with this function:
351 start_time
= time
.time()
352 func_name
= "<unknown>"
353 if func
.__class
__ == functools
.partial
:
354 func_name
= func
.func
.__name
__
356 func_name
= func
.__name
__
359 "'{}' polling started (interval {} secs, maximum {} tries)".format(
360 func_name
, wait
, count
371 end_time
= time
.time()
373 "'{}' succeeded after {:.2f} seconds".format(
374 func_name
, end_time
- start_time
377 return (True, result
)
379 end_time
= time
.time()
381 "'{}' failed after {:.2f} seconds".format(func_name
, end_time
- start_time
)
383 return (False, result
)
386 def run_and_expect_type(func
, etype
, count
=20, wait
=3, avalue
=None):
388 Run `func` and compare the result with `etype`. Do it for `count` times
389 waiting `wait` seconds between tries. By default it tries 20 times with
390 3 seconds delay between tries.
392 This function is used when you want to test the return type and,
393 optionally, the return value.
395 Returns (True, func-return) on success or
396 (False, func-return) on failure.
398 start_time
= time
.time()
399 func_name
= "<unknown>"
400 if func
.__class
__ == functools
.partial
:
401 func_name
= func
.func
.__name
__
403 func_name
= func
.__name
__
406 "'{}' polling started (interval {} secs, maximum wait {} secs)".format(
407 func_name
, wait
, int(wait
* count
)
413 if not isinstance(result
, etype
):
415 "Expected result type '{}' got '{}' instead".format(etype
, type(result
))
421 if etype
!= type(None) and avalue
!= None and result
!= avalue
:
422 logger
.debug("Expected value '{}' got '{}' instead".format(avalue
, result
))
427 end_time
= time
.time()
429 "'{}' succeeded after {:.2f} seconds".format(
430 func_name
, end_time
- start_time
433 return (True, result
)
435 end_time
= time
.time()
437 "'{}' failed after {:.2f} seconds".format(func_name
, end_time
- start_time
)
439 return (False, result
)
442 def router_json_cmp_retry(router
, cmd
, data
, exact
=False, retry_timeout
=10.0):
444 Runs `cmd` that returns JSON data (normally the command ends with 'json')
445 and compare with `data` contents. Retry by default for 10 seconds
449 return router_json_cmp(router
, cmd
, data
, exact
)
451 ok
, _
= run_and_expect(test_func
, None, int(retry_timeout
), 1)
456 "Converting Integer to DPID"
460 dpid
= "0" * (16 - len(dpid
)) + dpid
464 "Unable to derive default datapath ID - "
465 "please either specify a dpid or use a "
466 "canonical switch name such as s23."
471 "Check whether pid exists in the current process table."
476 os
.waitpid(pid
, os
.WNOHANG
)
481 except OSError as err
:
482 if err
.errno
== errno
.ESRCH
:
483 # ESRCH == No such process
485 elif err
.errno
== errno
.EPERM
:
486 # EPERM clearly means there's a process to deny access to
489 # According to "man 2 kill" possible error values are
490 # (EINVAL, EPERM, ESRCH)
496 def get_textdiff(text1
, text2
, title1
="", title2
="", **opts
):
497 "Returns empty string if same or formatted diff"
500 difflib
.unified_diff(text1
, text2
, fromfile
=title1
, tofile
=title2
, **opts
)
502 # Clean up line endings
503 diff
= os
.linesep
.join([s
for s
in diff
.splitlines() if s
])
507 def difflines(text1
, text2
, title1
="", title2
="", **opts
):
508 "Wrapper for get_textdiff to avoid string transformations."
509 text1
= ("\n".join(text1
.rstrip().splitlines()) + "\n").splitlines(1)
510 text2
= ("\n".join(text2
.rstrip().splitlines()) + "\n").splitlines(1)
511 return get_textdiff(text1
, text2
, title1
, title2
, **opts
)
514 def get_file(content
):
516 Generates a temporary file in '/tmp' with `content` and returns the file name.
518 if isinstance(content
, list) or isinstance(content
, tuple):
519 content
= "\n".join(content
)
520 fde
= tempfile
.NamedTemporaryFile(mode
="w", delete
=False)
527 def normalize_text(text
):
529 Strips formating spaces/tabs, carriage returns and trailing whitespace.
531 text
= re
.sub(r
"[ \t]+", " ", text
)
532 text
= re
.sub(r
"\r", "", text
)
534 # Remove whitespace in the middle of text.
535 text
= re
.sub(r
"[ \t]+\n", "\n", text
)
536 # Remove whitespace at the end of the text.
544 Parses unix name output to check if running on GNU/Linux.
546 Returns True if running on Linux, returns False otherwise.
549 if os
.uname()[0] == "Linux":
554 def iproute2_is_vrf_capable():
556 Checks if the iproute2 version installed on the system is capable of
557 handling VRFs by interpreting the output of the 'ip' utility found in PATH.
559 Returns True if capability can be detected, returns False otherwise.
564 subp
= subprocess
.Popen(
565 ["ip", "route", "show", "vrf"],
566 stdout
=subprocess
.PIPE
,
567 stderr
=subprocess
.PIPE
,
568 stdin
=subprocess
.PIPE
,
570 iproute2_err
= subp
.communicate()[1].splitlines()[0].split()[0]
572 if iproute2_err
!= "Error:":
579 def module_present_linux(module
, load
):
581 Returns whether `module` is present.
583 If `load` is true, it will try to load it via modprobe.
585 with
open("/proc/modules", "r") as modules_file
:
586 if module
.replace("-", "_") in modules_file
.read():
588 cmd
= "/sbin/modprobe {}{}".format("" if load
else "-n ", module
)
589 if os
.system(cmd
) != 0:
595 def module_present_freebsd(module
, load
):
599 def module_present(module
, load
=True):
600 if sys
.platform
.startswith("linux"):
601 return module_present_linux(module
, load
)
602 elif sys
.platform
.startswith("freebsd"):
603 return module_present_freebsd(module
, load
)
606 def version_cmp(v1
, v2
):
608 Compare two version strings and returns:
610 * `-1`: if `v1` is less than `v2`
611 * `0`: if `v1` is equal to `v2`
612 * `1`: if `v1` is greater than `v2`
614 Raises `ValueError` if versions are not well formated.
616 vregex
= r
"(?P<whole>\d+(\.(\d+))*)"
617 v1m
= re
.match(vregex
, v1
)
618 v2m
= re
.match(vregex
, v2
)
619 if v1m
is None or v2m
is None:
620 raise ValueError("got a invalid version string")
623 v1g
= v1m
.group("whole").split(".")
624 v2g
= v2m
.group("whole").split(".")
626 # Get the longest version string
631 # Reverse list because we are going to pop the tail
634 for _
in range(vnum
):
662 def interface_set_status(node
, ifacename
, ifaceaction
=False, vrf_name
=None):
664 str_ifaceaction
= "no shutdown"
666 str_ifaceaction
= "shutdown"
668 cmd
= 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
669 ifacename
, str_ifaceaction
673 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
674 ifacename
, vrf_name
, str_ifaceaction
680 def ip4_route_zebra(node
, vrf_name
=None):
682 Gets an output of 'show ip route' command. It can be used
683 with comparing the output to a reference
686 tmp
= node
.vtysh_cmd("show ip route")
688 tmp
= node
.vtysh_cmd("show ip route vrf {0}".format(vrf_name
))
689 output
= re
.sub(r
" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp
)
691 lines
= output
.splitlines()
693 while lines
and (not lines
[0].strip() or not header_found
):
694 if "o - offload failure" in lines
[0]:
697 return "\n".join(lines
)
700 def ip6_route_zebra(node
, vrf_name
=None):
702 Retrieves the output of 'show ipv6 route [vrf vrf_name]', then
703 canonicalizes it by eliding link-locals.
707 tmp
= node
.vtysh_cmd("show ipv6 route")
709 tmp
= node
.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name
))
712 output
= re
.sub(r
" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp
)
714 # Mask out the link-local addresses
715 output
= re
.sub(r
"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output
)
717 lines
= output
.splitlines()
719 while lines
and (not lines
[0].strip() or not header_found
):
720 if "o - offload failure" in lines
[0]:
724 return "\n".join(lines
)
727 def proto_name_to_number(protocol
):
743 ) # default return same as input
748 Gets a structured return of the command 'ip route'. It can be used in
749 conjunction with json_cmp() to provide accurate assert explanations.
764 output
= normalize_text(node
.run("ip route")).splitlines()
767 columns
= line
.split(" ")
768 route
= result
[columns
[0]] = {}
770 for column
in columns
:
772 route
["dev"] = column
774 route
["via"] = column
776 # translate protocol names back to numbers
777 route
["proto"] = proto_name_to_number(column
)
779 route
["metric"] = column
781 route
["scope"] = column
787 def ip4_vrf_route(node
):
789 Gets a structured return of the command 'ip route show vrf {0}-cust1'.
790 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
805 output
= normalize_text(
806 node
.run("ip route show vrf {0}-cust1".format(node
.name
))
811 columns
= line
.split(" ")
812 route
= result
[columns
[0]] = {}
814 for column
in columns
:
816 route
["dev"] = column
818 route
["via"] = column
820 # translate protocol names back to numbers
821 route
["proto"] = proto_name_to_number(column
)
823 route
["metric"] = column
825 route
["scope"] = column
833 Gets a structured return of the command 'ip -6 route'. It can be used in
834 conjunction with json_cmp() to provide accurate assert explanations.
848 output
= normalize_text(node
.run("ip -6 route")).splitlines()
851 columns
= line
.split(" ")
852 route
= result
[columns
[0]] = {}
854 for column
in columns
:
856 route
["dev"] = column
858 route
["via"] = column
860 # translate protocol names back to numbers
861 route
["proto"] = proto_name_to_number(column
)
863 route
["metric"] = column
865 route
["pref"] = column
871 def ip6_vrf_route(node
):
873 Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'.
874 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
888 output
= normalize_text(
889 node
.run("ip -6 route show vrf {0}-cust1".format(node
.name
))
893 columns
= line
.split(" ")
894 route
= result
[columns
[0]] = {}
896 for column
in columns
:
898 route
["dev"] = column
900 route
["via"] = column
902 # translate protocol names back to numbers
903 route
["proto"] = proto_name_to_number(column
)
905 route
["metric"] = column
907 route
["pref"] = column
915 Gets a structured return of the command 'ip rule'. It can be used in
916 conjunction with json_cmp() to provide accurate assert explanations.
932 "from": "1.2.0.0/16",
937 output
= normalize_text(node
.run("ip rule")).splitlines()
940 columns
= line
.split(" ")
943 # remove last character, since it is ':'
944 pref
= columns
[0][:-1]
947 for column
in columns
:
949 route
["from"] = column
953 route
["proto"] = column
955 route
["iif"] = column
957 route
["fwmark"] = column
964 def sleep(amount
, reason
=None):
966 Sleep wrapper that registers in the log the amount of sleep
969 logger
.info("Sleeping for {} seconds".format(amount
))
971 logger
.info(reason
+ " ({} seconds)".format(amount
))
976 def checkAddressSanitizerError(output
, router
, component
, logdir
=""):
977 "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
979 def processAddressSanitizerError(asanErrorRe
, output
, router
, component
):
981 "%s: %s triggered an exception by AddressSanitizer\n" % (router
, component
)
983 # Sanitizer Error found in log
984 pidMark
= asanErrorRe
.group(1)
985 addressSanitizerLog
= re
.search(
986 "%s(.*)%s" % (pidMark
, pidMark
), output
, re
.DOTALL
988 if addressSanitizerLog
:
989 # Find Calling Test. Could be multiple steps back
990 testframe
= sys
._current
_frames
().values()[0]
993 test
= os
.path
.splitext(
994 os
.path
.basename(testframe
.f_globals
["__file__"])
996 if (test
!= "topotest") and (test
!= "topogen"):
997 # Found the calling test
998 callingTest
= os
.path
.basename(testframe
.f_globals
["__file__"])
1001 testframe
= testframe
.f_back
1003 # somehow couldn't find the test script.
1004 callingTest
= "unknownTest"
1006 # Now finding Calling Procedure
1009 callingProc
= sys
._getframe
(level
).f_code
.co_name
1011 (callingProc
!= "processAddressSanitizerError")
1012 and (callingProc
!= "checkAddressSanitizerError")
1013 and (callingProc
!= "checkRouterCores")
1014 and (callingProc
!= "stopRouter")
1015 and (callingProc
!= "stop")
1016 and (callingProc
!= "stop_topology")
1017 and (callingProc
!= "checkRouterRunning")
1018 and (callingProc
!= "check_router_running")
1019 and (callingProc
!= "routers_have_failure")
1021 # Found the calling test
1025 # something wrong - couldn't found the calling test function
1026 callingProc
= "unknownProc"
1027 with
open("/tmp/AddressSanitzer.txt", "a") as addrSanFile
:
1029 "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1030 % (callingTest
, callingProc
, router
)
1033 "\n".join(addressSanitizerLog
.group(1).splitlines()) + "\n"
1035 addrSanFile
.write("## Error: %s\n\n" % asanErrorRe
.group(2))
1037 "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1038 % (callingTest
, callingProc
, router
)
1042 + "\n ".join(addressSanitizerLog
.group(1).splitlines())
1045 addrSanFile
.write("\n---------------\n")
1048 addressSanitizerError
= re
.search(
1049 r
"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
1051 if addressSanitizerError
:
1052 processAddressSanitizerError(addressSanitizerError
, output
, router
, component
)
1055 # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
1057 filepattern
= logdir
+ "/" + router
+ "/" + component
+ ".asan.*"
1059 "Log check for %s on %s, pattern %s\n" % (component
, router
, filepattern
)
1061 for file in glob
.glob(filepattern
):
1062 with
open(file, "r") as asanErrorFile
:
1063 asanError
= asanErrorFile
.read()
1064 addressSanitizerError
= re
.search(
1065 r
"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
1067 if addressSanitizerError
:
1068 processAddressSanitizerError(
1069 addressSanitizerError
, asanError
, router
, component
1075 def _sysctl_atleast(commander
, variable
, min_value
):
1076 if isinstance(min_value
, tuple):
1077 min_value
= list(min_value
)
1078 is_list
= isinstance(min_value
, list)
1080 sval
= commander
.cmd_raises("sysctl -n " + variable
).strip()
1082 cur_val
= [int(x
) for x
in sval
.split()]
1088 for i
, v
in enumerate(cur_val
):
1089 if v
< min_value
[i
]:
1094 if cur_val
< min_value
:
1098 valstr
= " ".join([str(x
) for x
in min_value
])
1100 valstr
= str(min_value
)
1101 logger
.info("Increasing sysctl %s from %s to %s", variable
, cur_val
, valstr
)
1102 commander
.cmd_raises('sysctl -w {}="{}"\n'.format(variable
, valstr
))
1105 def _sysctl_assure(commander
, variable
, value
):
1106 if isinstance(value
, tuple):
1108 is_list
= isinstance(value
, list)
1110 sval
= commander
.cmd_raises("sysctl -n " + variable
).strip()
1112 cur_val
= [int(x
) for x
in sval
.split()]
1118 for i
, v
in enumerate(cur_val
):
1124 if cur_val
!= str(value
):
1129 valstr
= " ".join([str(x
) for x
in value
])
1132 logger
.info("Changing sysctl %s from %s to %s", variable
, cur_val
, valstr
)
1133 commander
.cmd_raises('sysctl -w {}="{}"\n'.format(variable
, valstr
))
1136 def sysctl_atleast(commander
, variable
, min_value
, raises
=False):
1138 if commander
is None:
1139 commander
= micronet
.Commander("topotest")
1140 return _sysctl_atleast(commander
, variable
, min_value
)
1141 except subprocess
.CalledProcessError
as error
:
1143 "%s: Failed to assure sysctl min value %s = %s",
1152 def sysctl_assure(commander
, variable
, value
, raises
=False):
1154 if commander
is None:
1155 commander
= micronet
.Commander("topotest")
1156 return _sysctl_assure(commander
, variable
, value
)
1157 except subprocess
.CalledProcessError
as error
:
1159 "%s: Failed to assure sysctl value %s = %s",
1169 def rlimit_atleast(rname
, min_value
, raises
=False):
1171 cval
= resource
.getrlimit(rname
)
1173 if soft
< min_value
:
1174 nval
= (min_value
, hard
if min_value
< hard
else min_value
)
1175 logger
.info("Increasing rlimit %s from %s to %s", rname
, cval
, nval
)
1176 resource
.setrlimit(rname
, nval
)
1177 except subprocess
.CalledProcessError
as error
:
1179 "Failed to assure rlimit [%s] = %s", rname
, min_value
, exc_info
=True
1185 def fix_netns_limits(ns
):
1187 # Maximum read and write socket buffer sizes
1188 sysctl_atleast(ns
, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20])
1189 sysctl_atleast(ns
, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20])
1191 sysctl_assure(ns
, "net.ipv4.conf.all.rp_filter", 0)
1192 sysctl_assure(ns
, "net.ipv4.conf.default.rp_filter", 0)
1193 sysctl_assure(ns
, "net.ipv4.conf.lo.rp_filter", 0)
1195 sysctl_assure(ns
, "net.ipv4.conf.all.forwarding", 1)
1196 sysctl_assure(ns
, "net.ipv4.conf.default.forwarding", 1)
1198 # XXX if things fail look here as this wasn't done previously
1199 sysctl_assure(ns
, "net.ipv6.conf.all.forwarding", 1)
1200 sysctl_assure(ns
, "net.ipv6.conf.default.forwarding", 1)
1203 sysctl_assure(ns
, "net.ipv4.conf.default.arp_announce", 2)
1204 sysctl_assure(ns
, "net.ipv4.conf.default.arp_notify", 1)
1205 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1206 sysctl_assure(ns
, "net.ipv4.conf.default.arp_ignore", 0)
1207 sysctl_assure(ns
, "net.ipv4.conf.all.arp_announce", 2)
1208 sysctl_assure(ns
, "net.ipv4.conf.all.arp_notify", 1)
1209 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1210 sysctl_assure(ns
, "net.ipv4.conf.all.arp_ignore", 0)
1212 sysctl_assure(ns
, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
1214 # Keep ipv6 permanent addresses on an admin down
1215 sysctl_assure(ns
, "net.ipv6.conf.all.keep_addr_on_down", 1)
1216 if version_cmp(platform
.release(), "4.20") >= 0:
1217 sysctl_assure(ns
, "net.ipv6.route.skip_notify_on_dev_down", 1)
1219 sysctl_assure(ns
, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
1220 sysctl_assure(ns
, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
1223 sysctl_atleast(ns
, "net.ipv4.igmp_max_memberships", 1000)
1225 # Use neigh information on selection of nexthop for multipath hops
1226 sysctl_assure(ns
, "net.ipv4.fib_multipath_use_neigh", 1)
1229 def fix_host_limits():
1230 """Increase system limits."""
1232 rlimit_atleast(resource
.RLIMIT_NPROC
, 8 * 1024)
1233 rlimit_atleast(resource
.RLIMIT_NOFILE
, 16 * 1024)
1234 sysctl_atleast(None, "fs.file-max", 16 * 1024)
1235 sysctl_atleast(None, "kernel.pty.max", 16 * 1024)
1238 # Original on ubuntu 17.x, but apport won't save as in namespace
1239 # |/usr/share/apport/apport %p %s %c %d %P
1240 sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
1241 sysctl_assure(None, "kernel.core_uses_pid", 1)
1242 sysctl_assure(None, "fs.suid_dumpable", 1)
1244 # Maximum connection backlog
1245 sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
1247 # Maximum read and write socket buffer sizes
1248 sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20)
1249 sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20)
1251 # Garbage Collection Settings for ARP and Neighbors
1252 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
1253 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024)
1254 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024)
1255 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024)
1256 # Hold entries for 10 minutes
1257 sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1258 sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1261 sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
1264 sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
1266 # Increase routing table size to 128K
1267 sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024)
1268 sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024)
1271 def setup_node_tmpdir(logdir
, name
):
1272 # Cleanup old log, valgrind, and core files.
1273 subprocess
.check_call(
1274 "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir
, name
), shell
=True
1277 # Setup the per node directory.
1278 nodelogdir
= "{}/{}".format(logdir
, name
)
1279 subprocess
.check_call(
1280 "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir
), shell
=True
1282 logfile
= "{0}/{1}.log".format(logdir
, name
)
1287 "A Node with IPv4/IPv6 forwarding enabled"
1289 def __init__(self
, name
, **params
):
1291 # Backward compatibility:
1292 # Load configuration defaults like topogen.
1293 self
.config_defaults
= configparser
.ConfigParser(
1295 "verbosity": "info",
1296 "frrdir": "/usr/lib/frr",
1297 "routertype": "frr",
1302 self
.config_defaults
.read(
1303 os
.path
.join(os
.path
.dirname(os
.path
.realpath(__file__
)), "../pytest.ini")
1306 # If this topology is using old API and doesn't have logdir
1307 # specified, then attempt to generate an unique logdir.
1308 self
.logdir
= params
.get("logdir")
1309 if self
.logdir
is None:
1310 self
.logdir
= get_logs_path(g_extra_config
["rundir"])
1312 if not params
.get("logger"):
1313 # If logger is present topogen has already set this up
1314 logfile
= setup_node_tmpdir(self
.logdir
, name
)
1315 l
= topolog
.get_logger(name
, log_level
="debug", target
=logfile
)
1316 params
["logger"] = l
1318 super(Router
, self
).__init
__(name
, **params
)
1320 self
.daemondir
= None
1321 self
.hasmpls
= False
1322 self
.routertype
= "frr"
1323 self
.unified_config
= None
1345 self
.daemons_options
= {"zebra": ""}
1346 self
.reportCores
= True
1349 self
.ns_cmd
= "sudo nsenter -a -t {} ".format(self
.pid
)
1351 # Allow escaping from running inside docker
1352 cgroup
= open("/proc/1/cgroup").read()
1353 m
= re
.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup
)
1355 self
.ns_cmd
= "docker exec -it {} ".format(m
.group(1)) + self
.ns_cmd
1359 logger
.debug("CMD to enter {}: {}".format(self
.name
, self
.ns_cmd
))
1361 def _config_frr(self
, **params
):
1362 "Configure FRR binaries"
1363 self
.daemondir
= params
.get("frrdir")
1364 if self
.daemondir
is None:
1365 self
.daemondir
= self
.config_defaults
.get("topogen", "frrdir")
1367 zebra_path
= os
.path
.join(self
.daemondir
, "zebra")
1368 if not os
.path
.isfile(zebra_path
):
1369 raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path
))
1371 # pylint: disable=W0221
1372 # Some params are only meaningful for the parent class.
1373 def config(self
, **params
):
1374 super(Router
, self
).config(**params
)
1376 # User did not specify the daemons directory, try to autodetect it.
1377 self
.daemondir
= params
.get("daemondir")
1378 if self
.daemondir
is None:
1379 self
.routertype
= params
.get(
1380 "routertype", self
.config_defaults
.get("topogen", "routertype")
1382 self
._config
_frr
(**params
)
1384 # Test the provided path
1385 zpath
= os
.path
.join(self
.daemondir
, "zebra")
1386 if not os
.path
.isfile(zpath
):
1387 raise Exception("No zebra binary found in {}".format(zpath
))
1388 # Allow user to specify routertype when the path was specified.
1389 if params
.get("routertype") is not None:
1390 self
.routertype
= params
.get("routertype")
1392 # Set ownership of config files
1393 self
.cmd("chown {0}:{0}vty /etc/{0}".format(self
.routertype
))
1395 def terminate(self
):
1396 # Stop running FRR daemons
1398 super(Router
, self
).terminate()
1399 os
.system("chmod -R go+rw " + self
.logdir
)
1401 # Return count of running daemons
1402 def listDaemons(self
):
1404 rc
, stdout
, _
= self
.cmd_status(
1405 "ls -1 /var/run/%s/*.pid" % self
.routertype
, warn
=False
1409 for d
in stdout
.strip().split("\n"):
1412 pid
= int(self
.cmd_raises("cat %s" % pidfile
, warn
=False).strip())
1413 name
= os
.path
.basename(pidfile
[:-4])
1415 # probably not compatible with bsd.
1416 rc
, _
, _
= self
.cmd_status("test -d /proc/{}".format(pid
), warn
=False)
1419 "%s: %s exited leaving pidfile %s (%s)",
1425 self
.cmd("rm -- " + pidfile
)
1427 ret
.append((name
, pid
))
1428 except (subprocess
.CalledProcessError
, ValueError):
1432 def stopRouter(self
, assertOnError
=True, minErrorVersion
="5.1"):
1433 # Stop Running FRR Daemons
1434 running
= self
.listDaemons()
1438 logger
.info("%s: stopping %s", self
.name
, ", ".join([x
[0] for x
in running
]))
1439 for name
, pid
in running
:
1440 logger
.info("{}: sending SIGTERM to {}".format(self
.name
, name
))
1442 os
.kill(pid
, signal
.SIGTERM
)
1443 except OSError as err
:
1445 "%s: could not kill %s (%s): %s", self
.name
, name
, pid
, str(err
)
1448 running
= self
.listDaemons()
1450 for _
in range(0, 30):
1453 "{}: waiting for daemons stopping: {}".format(
1454 self
.name
, ", ".join([x
[0] for x
in running
])
1457 running
= self
.listDaemons()
1465 "%s: sending SIGBUS to: %s", self
.name
, ", ".join([x
[0] for x
in running
])
1467 for name
, pid
in running
:
1468 pidfile
= "/var/run/{}/{}.pid".format(self
.routertype
, name
)
1469 logger
.info("%s: killing %s", self
.name
, name
)
1470 self
.cmd("kill -SIGBUS %d" % pid
)
1471 self
.cmd("rm -- " + pidfile
)
1474 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self
.name
1477 errors
= self
.checkRouterCores(reportOnce
=True)
1478 if self
.checkRouterVersion("<", minErrorVersion
):
1479 # ignore errors in old versions
1481 if assertOnError
and (errors
is not None) and len(errors
) > 0:
1482 assert "Errors found - details follow:" == 0, errors
1485 def removeIPs(self
):
1486 for interface
in self
.intfNames():
1488 self
.intf_ip_cmd(interface
, "ip address flush " + interface
)
1489 except Exception as ex
:
1490 logger
.error("%s can't remove IPs %s", self
, str(ex
))
1492 # assert False, "can't remove IPs %s" % str(ex)
1494 def checkCapability(self
, daemon
, param
):
1495 if param
is not None:
1496 daemon_path
= os
.path
.join(self
.daemondir
, daemon
)
1497 daemon_search_option
= param
.replace("-", "")
1499 "{0} -h | grep {1}".format(daemon_path
, daemon_search_option
)
1501 if daemon_search_option
not in output
:
1505 def loadConf(self
, daemon
, source
=None, param
=None):
1506 """Enabled and set config for a daemon.
1508 Arranges for loading of daemon configuration from the specified source. Possible
1509 `source` values are `None` for an empty config file, a path name which is used
1510 directly, or a file name with no path components which is first looked for
1511 directly and then looked for under a sub-directory named after router.
1514 # Unfortunately this API allowsfor source to not exist for any and all routers.
1516 head
, tail
= os
.path
.split(source
)
1517 if not head
and not self
.path_exists(tail
):
1518 script_dir
= os
.environ
["PYTEST_TOPOTEST_SCRIPTDIR"]
1519 router_relative
= os
.path
.join(script_dir
, self
.name
, tail
)
1520 if self
.path_exists(router_relative
):
1521 source
= router_relative
1523 "using router relative configuration: {}".format(source
)
1526 # print "Daemons before:", self.daemons
1527 if daemon
in self
.daemons
.keys() or daemon
== "frr":
1529 self
.unified_config
= 1
1531 self
.daemons
[daemon
] = 1
1532 if param
is not None:
1533 self
.daemons_options
[daemon
] = param
1534 conf_file
= "/etc/{}/{}.conf".format(self
.routertype
, daemon
)
1535 if source
is None or not os
.path
.exists(source
):
1536 if daemon
== "frr" or not self
.unified_config
:
1537 self
.cmd_raises("rm -f " + conf_file
)
1538 self
.cmd_raises("touch " + conf_file
)
1540 self
.cmd_raises("cp {} {}".format(source
, conf_file
))
1542 if not self
.unified_config
or daemon
== "frr":
1543 self
.cmd_raises("chown {0}:{0} {1}".format(self
.routertype
, conf_file
))
1544 self
.cmd_raises("chmod 664 {}".format(conf_file
))
1546 if (daemon
== "snmpd") and (self
.routertype
== "frr"):
1547 # /etc/snmp is private mount now
1548 self
.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf')
1549 self
.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
1551 if (daemon
== "zebra") and (self
.daemons
["staticd"] == 0):
1552 # Add staticd with zebra - if it exists
1554 staticd_path
= os
.path
.join(self
.daemondir
, "staticd")
1558 if os
.path
.isfile(staticd_path
):
1559 self
.daemons
["staticd"] = 1
1560 self
.daemons_options
["staticd"] = ""
1561 # Auto-Started staticd has no config, so it will read from zebra config
1563 logger
.info("No daemon {} known".format(daemon
))
1564 # print "Daemons after:", self.daemons
1566 def runInWindow(self
, cmd
, title
=None):
1567 return self
.run_in_window(cmd
, title
)
1569 def startRouter(self
, tgen
=None):
1570 if self
.unified_config
:
1572 'echo "service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1576 # Disable integrated-vtysh-config
1578 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1583 "chown %s:%svty /etc/%s/vtysh.conf"
1584 % (self
.routertype
, self
.routertype
, self
.routertype
)
1586 # TODO remove the following lines after all tests are migrated to Topogen.
1587 # Try to find relevant old logfiles in /tmp and delete them
1588 map(os
.remove
, glob
.glob("{}/{}/*.log".format(self
.logdir
, self
.name
)))
1589 # Remove old core files
1590 map(os
.remove
, glob
.glob("{}/{}/*.dmp".format(self
.logdir
, self
.name
)))
1591 # Remove IP addresses from OS first - we have them in zebra.conf
1593 # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
1594 # No error - but return message and skip all the tests
1595 if self
.daemons
["ldpd"] == 1:
1596 ldpd_path
= os
.path
.join(self
.daemondir
, "ldpd")
1597 if not os
.path
.isfile(ldpd_path
):
1598 logger
.info("LDP Test, but no ldpd compiled or installed")
1599 return "LDP Test, but no ldpd compiled or installed"
1601 if version_cmp(platform
.release(), "4.5") < 0:
1602 logger
.info("LDP Test need Linux Kernel 4.5 minimum")
1603 return "LDP Test need Linux Kernel 4.5 minimum"
1604 # Check if have mpls
1606 self
.hasmpls
= tgen
.hasmpls
1607 if self
.hasmpls
!= True:
1609 "LDP/MPLS Tests will be skipped, platform missing module(s)"
1612 # Test for MPLS Kernel modules available
1613 self
.hasmpls
= False
1614 if not module_present("mpls-router"):
1616 "MPLS tests will not run (missing mpls-router kernel module)"
1618 elif not module_present("mpls-iptunnel"):
1620 "MPLS tests will not run (missing mpls-iptunnel kernel module)"
1624 if self
.hasmpls
!= True:
1625 return "LDP/MPLS Tests need mpls kernel modules"
1627 # Really want to use sysctl_atleast here, but only when MPLS is actually being
1629 self
.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
1631 shell_routers
= g_extra_config
["shell"]
1632 if "all" in shell_routers
or self
.name
in shell_routers
:
1633 self
.run_in_window(os
.getenv("SHELL", "bash"), title
="sh-%s" % self
.name
)
1635 if self
.daemons
["eigrpd"] == 1:
1636 eigrpd_path
= os
.path
.join(self
.daemondir
, "eigrpd")
1637 if not os
.path
.isfile(eigrpd_path
):
1638 logger
.info("EIGRP Test, but no eigrpd compiled or installed")
1639 return "EIGRP Test, but no eigrpd compiled or installed"
1641 if self
.daemons
["bfdd"] == 1:
1642 bfdd_path
= os
.path
.join(self
.daemondir
, "bfdd")
1643 if not os
.path
.isfile(bfdd_path
):
1644 logger
.info("BFD Test, but no bfdd compiled or installed")
1645 return "BFD Test, but no bfdd compiled or installed"
1647 status
= self
.startRouterDaemons(tgen
=tgen
)
1649 vtysh_routers
= g_extra_config
["vtysh"]
1650 if "all" in vtysh_routers
or self
.name
in vtysh_routers
:
1651 self
.run_in_window("vtysh", title
="vt-%s" % self
.name
)
1653 if self
.unified_config
:
1654 self
.cmd("vtysh -f /etc/frr/frr.conf")
1658 def getStdErr(self
, daemon
):
1659 return self
.getLog("err", daemon
)
1661 def getStdOut(self
, daemon
):
1662 return self
.getLog("out", daemon
)
1664 def getLog(self
, log
, daemon
):
1665 return self
.cmd("cat {}/{}/{}.{}".format(self
.logdir
, self
.name
, daemon
, log
))
1667 def startRouterDaemons(self
, daemons
=None, tgen
=None):
1668 "Starts FRR daemons for this router."
1670 asan_abort
= g_extra_config
["asan_abort"]
1671 gdb_breakpoints
= g_extra_config
["gdb_breakpoints"]
1672 gdb_daemons
= g_extra_config
["gdb_daemons"]
1673 gdb_routers
= g_extra_config
["gdb_routers"]
1674 valgrind_extra
= g_extra_config
["valgrind_extra"]
1675 valgrind_memleaks
= g_extra_config
["valgrind_memleaks"]
1676 strace_daemons
= g_extra_config
["strace_daemons"]
1678 # Get global bundle data
1679 if not self
.path_exists("/etc/frr/support_bundle_commands.conf"):
1680 # Copy global value if was covered by namespace mount
1682 if os
.path
.exists("/etc/frr/support_bundle_commands.conf"):
1683 with
open("/etc/frr/support_bundle_commands.conf", "r") as rf
:
1684 bundle_data
= rf
.read()
1686 "cat > /etc/frr/support_bundle_commands.conf",
1690 # Starts actual daemons without init (ie restart)
1691 # cd to per node directory
1692 self
.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self
.logdir
, self
.name
))
1693 self
.set_cwd("{}/{}".format(self
.logdir
, self
.name
))
1694 self
.cmd("umask 000")
1696 # Re-enable to allow for report per run
1697 self
.reportCores
= True
1699 # XXX: glue code forward ported from removed function.
1700 if self
.version
== None:
1701 self
.version
= self
.cmd(
1702 os
.path
.join(self
.daemondir
, "bgpd") + " -v"
1704 logger
.info("{}: running version: {}".format(self
.name
, self
.version
))
1705 # If `daemons` was specified then some upper API called us with
1706 # specific daemons, otherwise just use our own configuration.
1708 if daemons
is not None:
1709 daemons_list
= daemons
1711 # Append all daemons configured.
1712 for daemon
in self
.daemons
:
1713 if self
.daemons
[daemon
] == 1:
1714 daemons_list
.append(daemon
)
1716 def start_daemon(daemon
, extra_opts
=None):
1717 daemon_opts
= self
.daemons_options
.get(daemon
, "")
1718 rediropt
= " > {0}.out 2> {0}.err".format(daemon
)
1719 if daemon
== "snmpd":
1720 binary
= "/usr/sbin/snmpd"
1722 cmdopt
= "{} -C -c /etc/frr/snmpd.conf -p ".format(
1724 ) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self
.routertype
)
1726 binary
= os
.path
.join(self
.daemondir
, daemon
)
1728 cmdenv
= "ASAN_OPTIONS="
1730 cmdenv
= "abort_on_error=1:"
1731 cmdenv
+= "log_path={0}/{1}.{2}.asan ".format(
1732 self
.logdir
, self
.name
, daemon
1735 if valgrind_memleaks
:
1736 this_dir
= os
.path
.dirname(
1737 os
.path
.abspath(os
.path
.realpath(__file__
))
1739 supp_file
= os
.path
.abspath(
1740 os
.path
.join(this_dir
, "../../../tools/valgrind.supp")
1742 cmdenv
+= " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
1743 daemon
, self
.logdir
, self
.name
, supp_file
1747 " --gen-suppressions=all --expensive-definedness-checks=yes"
1749 elif daemon
in strace_daemons
or "all" in strace_daemons
:
1750 cmdenv
= "strace -f -D -o {1}/{2}.strace.{0} ".format(
1751 daemon
, self
.logdir
, self
.name
1754 cmdopt
= "{} --command-log-always --log file:{}.log --log-level debug".format(
1758 cmdopt
+= " " + extra_opts
1761 (gdb_routers
or gdb_daemons
)
1763 not gdb_routers
or self
.name
in gdb_routers
or "all" in gdb_routers
1765 and (not gdb_daemons
or daemon
in gdb_daemons
or "all" in gdb_daemons
)
1767 if daemon
== "snmpd":
1771 gdbcmd
= "sudo -E gdb " + binary
1773 gdbcmd
+= " -ex 'set breakpoint pending on'"
1774 for bp
in gdb_breakpoints
:
1775 gdbcmd
+= " -ex 'b {}'".format(bp
)
1776 gdbcmd
+= " -ex 'run {}'".format(cmdopt
)
1778 self
.run_in_window(gdbcmd
, daemon
)
1781 "%s: %s %s launched in gdb window", self
, self
.routertype
, daemon
1784 if daemon
!= "snmpd":
1789 self
.cmd_raises(" ".join([cmdenv
, binary
, cmdopt
]), warn
=False)
1790 except subprocess
.CalledProcessError
as error
:
1792 '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
1797 '\n:stdout: "{}"'.format(error
.stdout
.strip())
1800 '\n:stderr: "{}"'.format(error
.stderr
.strip())
1805 logger
.info("%s: %s %s started", self
, self
.routertype
, daemon
)
1808 if "zebra" in daemons_list
:
1809 start_daemon("zebra", "-s 90000000")
1810 while "zebra" in daemons_list
:
1811 daemons_list
.remove("zebra")
1813 # Start staticd next if required
1814 if "staticd" in daemons_list
:
1815 start_daemon("staticd")
1816 while "staticd" in daemons_list
:
1817 daemons_list
.remove("staticd")
1819 if "snmpd" in daemons_list
:
1820 # Give zerbra a chance to configure interface addresses that snmpd daemon
1824 start_daemon("snmpd")
1825 while "snmpd" in daemons_list
:
1826 daemons_list
.remove("snmpd")
1829 # Fix Link-Local Addresses on initial startup
1830 # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
1831 _
, output
, _
= self
.cmd_status(
1832 "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done",
1833 stderr
=subprocess
.STDOUT
,
1835 logger
.debug("Set MACs:\n%s", output
)
1837 # Now start all the other daemons
1838 for daemon
in daemons_list
:
1839 if self
.daemons
[daemon
] == 0:
1841 start_daemon(daemon
)
1843 # Check if daemons are running.
1844 rundaemons
= self
.cmd("ls -1 /var/run/%s/*.pid" % self
.routertype
)
1845 if re
.search(r
"No such file or directory", rundaemons
):
1846 return "Daemons are not running"
1848 # Update the permissions on the log files
1849 self
.cmd("chown frr:frr -R {}/{}".format(self
.logdir
, self
.name
))
1850 self
.cmd("chmod ug+rwX,o+r -R {}/{}".format(self
.logdir
, self
.name
))
1854 def killRouterDaemons(
1855 self
, daemons
, wait
=True, assertOnError
=True, minErrorVersion
="5.1"
1858 # Daemons(user specified daemon only) using SIGKILL
1859 rundaemons
= self
.cmd("ls -1 /var/run/%s/*.pid" % self
.routertype
)
1861 daemonsNotRunning
= []
1862 if re
.search(r
"No such file or directory", rundaemons
):
1864 for daemon
in daemons
:
1865 if rundaemons
is not None and daemon
in rundaemons
:
1867 dmns
= rundaemons
.split("\n")
1868 # Exclude empty string at end of list
1870 if re
.search(r
"%s" % daemon
, d
):
1871 daemonpid
= self
.cmd("cat %s" % d
.rstrip()).rstrip()
1872 if daemonpid
.isdigit() and pid_exists(int(daemonpid
)):
1874 "{}: killing {}".format(
1876 os
.path
.basename(d
.rstrip().rsplit(".", 1)[0]),
1879 self
.cmd("kill -9 %s" % daemonpid
)
1880 if pid_exists(int(daemonpid
)):
1882 while wait
and numRunning
> 0:
1885 "{}: waiting for {} daemon to be stopped".format(
1890 # 2nd round of kill if daemons didn't exit
1892 if re
.search(r
"%s" % daemon
, d
):
1893 daemonpid
= self
.cmd("cat %s" % d
.rstrip()).rstrip()
1894 if daemonpid
.isdigit() and pid_exists(
1898 "{}: killing {}".format(
1901 d
.rstrip().rsplit(".", 1)[0]
1905 self
.cmd("kill -9 %s" % daemonpid
)
1906 if daemonpid
.isdigit() and not pid_exists(
1910 self
.cmd("rm -- {}".format(d
.rstrip()))
1912 errors
= self
.checkRouterCores(reportOnce
=True)
1913 if self
.checkRouterVersion("<", minErrorVersion
):
1914 # ignore errors in old versions
1916 if assertOnError
and len(errors
) > 0:
1917 assert "Errors found - details follow:" == 0, errors
1919 daemonsNotRunning
.append(daemon
)
1920 if len(daemonsNotRunning
) > 0:
1921 errors
= errors
+ "Daemons are not running", daemonsNotRunning
1925 def checkRouterCores(self
, reportLeaks
=True, reportOnce
=False):
1926 if reportOnce
and not self
.reportCores
:
1930 for daemon
in self
.daemons
:
1931 if self
.daemons
[daemon
] == 1:
1932 # Look for core file
1933 corefiles
= glob
.glob(
1934 "{}/{}/{}_core*.dmp".format(self
.logdir
, self
.name
, daemon
)
1936 if len(corefiles
) > 0:
1937 backtrace
= gdb_core(self
, daemon
, corefiles
)
1940 + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s"
1941 % (self
.name
, daemon
, backtrace
)
1945 log
= self
.getStdErr(daemon
)
1946 if "memstats" in log
:
1948 "%s: %s has memory leaks:\n" % (self
.name
, daemon
)
1950 traces
= traces
+ "\n%s: %s has memory leaks:\n" % (
1954 log
= re
.sub("core_handler: ", "", log
)
1956 r
"(showing active allocations in memory group [a-zA-Z0-9]+)",
1960 log
= re
.sub("memstats: ", " ", log
)
1961 sys
.stderr
.write(log
)
1963 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
1964 if checkAddressSanitizerError(
1965 self
.getStdErr(daemon
), self
.name
, daemon
, self
.logdir
1968 "%s: Daemon %s killed by AddressSanitizer" % (self
.name
, daemon
)
1970 traces
= traces
+ "\n%s: Daemon %s killed by AddressSanitizer" % (
1976 self
.reportCores
= False
1979 def checkRouterRunning(self
):
1980 "Check if router daemons are running and collect crashinfo they don't run"
1984 daemonsRunning
= self
.cmd(
1985 'vtysh -c "show logging" | grep "Logging configuration for"'
1987 # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
1988 if checkAddressSanitizerError(daemonsRunning
, self
.name
, "vtysh"):
1989 return "%s: vtysh killed by AddressSanitizer" % (self
.name
)
1991 for daemon
in self
.daemons
:
1992 if daemon
== "snmpd":
1994 if (self
.daemons
[daemon
] == 1) and not (daemon
in daemonsRunning
):
1995 sys
.stderr
.write("%s: Daemon %s not running\n" % (self
.name
, daemon
))
1996 if daemon
== "staticd":
1998 "You may have a copy of staticd installed but are attempting to test against\n"
2001 "a version of FRR that does not have staticd, please cleanup the install dir\n"
2004 # Look for core file
2005 corefiles
= glob
.glob(
2006 "{}/{}/{}_core*.dmp".format(self
.logdir
, self
.name
, daemon
)
2008 if len(corefiles
) > 0:
2009 gdb_core(self
, daemon
, corefiles
)
2011 # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
2013 "{}/{}/{}.log".format(self
.logdir
, self
.name
, daemon
)
2015 log_tail
= subprocess
.check_output(
2017 "tail -n20 {}/{}/{}.log 2> /dev/null".format(
2018 self
.logdir
, self
.name
, daemon
2024 "\nFrom %s %s %s log file:\n"
2025 % (self
.routertype
, self
.name
, daemon
)
2027 sys
.stderr
.write("%s\n" % log_tail
)
2029 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2030 if checkAddressSanitizerError(
2031 self
.getStdErr(daemon
), self
.name
, daemon
, self
.logdir
2033 return "%s: Daemon %s not running - killed by AddressSanitizer" % (
2038 return "%s: Daemon %s not running" % (self
.name
, daemon
)
2041 def checkRouterVersion(self
, cmpop
, version
):
2043 Compares router version using operation `cmpop` with `version`.
2044 Valid `cmpop` values:
2045 * `>=`: has the same version or greater
2046 * '>': has greater version
2047 * '=': has the same version
2048 * '<': has a lesser version
2049 * '<=': has the same version or lesser
2051 Usage example: router.checkRouterVersion('>', '1.0')
2054 # Make sure we have version information first
2055 if self
.version
== None:
2056 self
.version
= self
.cmd(
2057 os
.path
.join(self
.daemondir
, "bgpd") + " -v"
2059 logger
.info("{}: running version: {}".format(self
.name
, self
.version
))
2061 rversion
= self
.version
2062 if rversion
== None:
2065 result
= version_cmp(rversion
, version
)
2079 def get_ipv6_linklocal(self
):
2080 "Get LinkLocal Addresses from interfaces"
2084 ifaces
= self
.cmd("ip -6 address")
2085 # Fix newlines (make them all the same)
2086 ifaces
= ("\n".join(ifaces
.splitlines()) + "\n").splitlines()
2090 m
= re
.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line
)
2092 interface
= m
.group(1)
2095 "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
2100 ll_per_if_count
+= 1
2101 if ll_per_if_count
> 1:
2102 linklocal
+= [["%s-%s" % (interface
, ll_per_if_count
), local
]]
2104 linklocal
+= [[interface
, local
]]
2107 def daemon_available(self
, daemon
):
2108 "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
2110 daemon_path
= os
.path
.join(self
.daemondir
, daemon
)
2111 if not os
.path
.isfile(daemon_path
):
2113 if daemon
== "ldpd":
2114 if version_cmp(platform
.release(), "4.5") < 0:
2116 if not module_present("mpls-router", load
=False):
2118 if not module_present("mpls-iptunnel", load
=False):
2122 def get_routertype(self
):
2123 "Return the type of Router (frr)"
2125 return self
.routertype
2127 def report_memory_leaks(self
, filename_prefix
, testscript
):
2128 "Report Memory Leaks to file prefixed with given string"
2131 filename
= filename_prefix
+ re
.sub(r
"\.py", "", testscript
) + ".txt"
2132 for daemon
in self
.daemons
:
2133 if self
.daemons
[daemon
] == 1:
2134 log
= self
.getStdErr(daemon
)
2135 if "memstats" in log
:
2138 "\nRouter {} {} StdErr Log:\n{}".format(self
.name
, daemon
, log
)
2142 # Check if file already exists
2143 fileexists
= os
.path
.isfile(filename
)
2144 leakfile
= open(filename
, "a")
2146 # New file - add header
2148 "# Memory Leak Detection for topotest %s\n\n"
2151 leakfile
.write("## Router %s\n" % self
.name
)
2152 leakfile
.write("### Process %s\n" % daemon
)
2153 log
= re
.sub("core_handler: ", "", log
)
2155 r
"(showing active allocations in memory group [a-zA-Z0-9]+)",
2159 log
= re
.sub("memstats: ", " ", log
)
2161 leakfile
.write("\n")
2167 """Convert string to unicode, depending on python version"""
2168 if sys
.version_info
[0] > 2:
2171 return unicode(s
) # pylint: disable=E0602
2175 return isinstance(o
, Mapping
)