2 # SPDX-License-Identifier: ISC
6 # Library of helper functions for NetDEF Topology Tests
8 # Copyright (c) 2016 by
9 # Network Device Education Foundation, Inc. ("NetDEF")
27 from collections
.abc
import Mapping
28 from copy
import deepcopy
30 import lib
.topolog
as topolog
31 from lib
.micronet_compat
import Node
32 from lib
.topolog
import logger
33 from munet
.base
import Timeout
35 from lib
import micronet
37 g_pytest_config
= None
40 def get_logs_path(rundir
):
41 logspath
= topolog
.get_test_logdir()
42 return os
.path
.join(rundir
, logspath
)
45 def gdb_core(obj
, daemon
, corefiles
):
61 gdbcmds
= [["-ex", i
.strip()] for i
in gdbcmds
.strip().split("\n")]
62 gdbcmds
= [item
for sl
in gdbcmds
for item
in sl
]
64 daemon_path
= os
.path
.join(obj
.daemondir
, daemon
)
65 backtrace
= subprocess
.check_output(
66 ["gdb", daemon_path
, corefiles
[0], "--batch"] + gdbcmds
69 "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj
.name
, daemon
)
71 sys
.stderr
.write("%s" % backtrace
)
75 class json_cmp_result(object):
76 "json_cmp result class for better assertion messages"
81 def add_error(self
, error
):
82 "Append error message to the result"
83 for line
in error
.splitlines():
84 self
.errors
.append(line
)
87 "Returns True if there were errors, otherwise False."
88 return len(self
.errors
) > 0
91 headline
= ["Generated JSON diff error report:", ""]
92 return headline
+ self
.errors
96 "Generated JSON diff error report:\n\n\n" + "\n".join(self
.errors
) + "\n\n"
100 def gen_json_diff_report(d1
, d2
, exact
=False, path
="> $", acc
=(0, "")):
102 Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye.
106 if isinstance(v
, (dict, list)):
107 return "\t" + "\t".join(
108 json
.dumps(v
, indent
=4, separators
=(",", ": ")).splitlines(True)
111 return "'{}'".format(v
)
114 if isinstance(v
, (list, tuple)):
116 elif isinstance(v
, dict):
118 elif isinstance(v
, (int, float)):
120 elif isinstance(v
, bool):
122 elif isinstance(v
, str):
127 def get_errors(other_acc
):
130 def get_errors_n(other_acc
):
133 def add_error(acc
, msg
, points
=1):
134 return (acc
[0] + points
, acc
[1] + "{}: {}\n".format(path
, msg
))
136 def merge_errors(acc
, other_acc
):
137 return (acc
[0] + other_acc
[0], acc
[1] + other_acc
[1])
140 return "{}[{}]".format(path
, idx
)
143 return "{}->{}".format(path
, key
)
145 def has_errors(other_acc
):
146 return other_acc
[0] > 0
149 not isinstance(d1
, (list, dict))
150 and not isinstance(d2
, (list, dict))
155 not isinstance(d1
, (list, dict))
156 and not isinstance(d2
, (list, dict))
161 "d1 has element with value '{}' but in d2 it has value '{}'".format(d1
, d2
),
165 and isinstance(d2
, list)
166 and ((len(d2
) > 0 and d2
[0] == "__ordered__") or exact
)
170 if len(d1
) != len(d2
):
173 "d1 has Array of length {} but in d2 it is of length {}".format(
178 for idx
, v1
, v2
in zip(range(0, len(d1
)), d1
, d2
):
180 acc
, gen_json_diff_report(v1
, v2
, exact
=exact
, path
=add_idx(idx
))
182 elif isinstance(d1
, list) and isinstance(d2
, list):
183 if len(d1
) < len(d2
):
186 "d1 has Array of length {} but in d2 it is of length {}".format(
191 for idx2
, v2
in zip(range(0, len(d2
)), d2
):
195 for idx1
, v1
in zip(range(0, len(d1
)), d1
):
196 tmp_v1
= deepcopy(v1
)
197 tmp_v2
= deepcopy(v2
)
198 tmp_diff
= gen_json_diff_report(tmp_v1
, tmp_v2
, path
=add_idx(idx1
))
199 if not has_errors(tmp_diff
):
203 elif not closest_diff
or get_errors_n(tmp_diff
) < get_errors_n(
206 closest_diff
= tmp_diff
208 if not found_match
and isinstance(v2
, (list, dict)):
209 sub_error
= "\n\n\t{}".format(
210 "\t".join(get_errors(closest_diff
).splitlines(True))
215 "d2 has the following element at index {} which is not present in d1: "
216 + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}"
217 ).format(idx2
, dump_json(v2
), closest_idx
, sub_error
),
219 if not found_match
and not isinstance(v2
, (list, dict)):
222 "d2 has the following element at index {} which is not present in d1: {}".format(
226 elif isinstance(d1
, dict) and isinstance(d2
, dict) and exact
:
227 invalid_keys_d1
= [k
for k
in d1
.keys() if k
not in d2
.keys()]
228 invalid_keys_d2
= [k
for k
in d2
.keys() if k
not in d1
.keys()]
229 for k
in invalid_keys_d1
:
230 acc
= add_error(acc
, "d1 has key '{}' which is not present in d2".format(k
))
231 for k
in invalid_keys_d2
:
232 acc
= add_error(acc
, "d2 has key '{}' which is not present in d1".format(k
))
233 valid_keys_intersection
= [k
for k
in d1
.keys() if k
in d2
.keys()]
234 for k
in valid_keys_intersection
:
236 acc
, gen_json_diff_report(d1
[k
], d2
[k
], exact
=exact
, path
=add_key(k
))
238 elif isinstance(d1
, dict) and isinstance(d2
, dict):
239 none_keys
= [k
for k
, v
in d2
.items() if v
== None]
240 none_keys_present
= [k
for k
in d1
.keys() if k
in none_keys
]
241 for k
in none_keys_present
:
243 acc
, "d1 has key '{}' which is not supposed to be present".format(k
)
245 keys
= [k
for k
, v
in d2
.items() if v
!= None]
246 invalid_keys_intersection
= [k
for k
in keys
if k
not in d1
.keys()]
247 for k
in invalid_keys_intersection
:
248 acc
= add_error(acc
, "d2 has key '{}' which is not present in d1".format(k
))
249 valid_keys_intersection
= [k
for k
in keys
if k
in d1
.keys()]
250 for k
in valid_keys_intersection
:
252 acc
, gen_json_diff_report(d1
[k
], d2
[k
], exact
=exact
, path
=add_key(k
))
257 "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format(
258 json_type(d1
), json_type(d2
)
266 def json_cmp(d1
, d2
, exact
=False):
268 JSON compare function. Receives two parameters:
269 * `d1`: parsed JSON data structure
270 * `d2`: parsed JSON data structure
272 Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
273 in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an
274 error report is generated and wrapped in a 'json_cmp_result()'. There are special
275 parameters and notations explained below which can be used to cover rather unusual
278 * when 'exact is set to 'True' then d1 and d2 are tested for equality (including
279 order within JSON Arrays)
280 * using 'null' (or 'None' in Python) as JSON Object value is checking for key
282 * using '*' as JSON Object value or Array value is checking for presence in d1
283 without checking the values
284 * using '__ordered__' as first element in a JSON Array in d2 will also check the
285 order when it is compared to an Array in d1
288 (errors_n
, errors
) = gen_json_diff_report(deepcopy(d1
), deepcopy(d2
), exact
=exact
)
291 result
= json_cmp_result()
292 result
.add_error(errors
)
298 def router_output_cmp(router
, cmd
, expected
):
300 Runs `cmd` in router and compares the output with `expected`.
303 normalize_text(router
.vtysh_cmd(cmd
)),
304 normalize_text(expected
),
305 title1
="Current output",
306 title2
="Expected output",
310 def router_json_cmp(router
, cmd
, data
, exact
=False):
312 Runs `cmd` that returns JSON data (normally the command ends with 'json')
313 and compare with `data` contents.
315 return json_cmp(router
.vtysh_cmd(cmd
, isjson
=True), data
, exact
)
318 def run_and_expect(func
, what
, count
=20, wait
=3):
320 Run `func` and compare the result with `what`. Do it for `count` times
321 waiting `wait` seconds between tries. By default it tries 20 times with
322 3 seconds delay between tries.
324 Returns (True, func-return) on success or
325 (False, func-return) on failure.
329 Helper functions to use with this function:
333 start_time
= time
.time()
334 func_name
= "<unknown>"
335 if func
.__class
__ == functools
.partial
:
336 func_name
= func
.func
.__name
__
338 func_name
= func
.__name
__
340 # Just a safety-check to avoid running topotests with very
341 # small wait/count arguments.
342 wait_time
= wait
* count
346 ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
351 "'{}' polling started (interval {} secs, maximum {} tries)".format(
352 func_name
, wait
, count
363 end_time
= time
.time()
365 "'{}' succeeded after {:.2f} seconds".format(
366 func_name
, end_time
- start_time
369 return (True, result
)
371 end_time
= time
.time()
373 "'{}' failed after {:.2f} seconds".format(func_name
, end_time
- start_time
)
375 return (False, result
)
378 def run_and_expect_type(func
, etype
, count
=20, wait
=3, avalue
=None):
380 Run `func` and compare the result with `etype`. Do it for `count` times
381 waiting `wait` seconds between tries. By default it tries 20 times with
382 3 seconds delay between tries.
384 This function is used when you want to test the return type and,
385 optionally, the return value.
387 Returns (True, func-return) on success or
388 (False, func-return) on failure.
390 start_time
= time
.time()
391 func_name
= "<unknown>"
392 if func
.__class
__ == functools
.partial
:
393 func_name
= func
.func
.__name
__
395 func_name
= func
.__name
__
397 # Just a safety-check to avoid running topotests with very
398 # small wait/count arguments.
399 wait_time
= wait
* count
403 ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
408 "'{}' polling started (interval {} secs, maximum wait {} secs)".format(
409 func_name
, wait
, int(wait
* count
)
415 if not isinstance(result
, etype
):
417 "Expected result type '{}' got '{}' instead".format(etype
, type(result
))
423 if etype
!= type(None) and avalue
!= None and result
!= avalue
:
424 logger
.debug("Expected value '{}' got '{}' instead".format(avalue
, result
))
429 end_time
= time
.time()
431 "'{}' succeeded after {:.2f} seconds".format(
432 func_name
, end_time
- start_time
435 return (True, result
)
437 end_time
= time
.time()
439 "'{}' failed after {:.2f} seconds".format(func_name
, end_time
- start_time
)
441 return (False, result
)
444 def router_json_cmp_retry(router
, cmd
, data
, exact
=False, retry_timeout
=10.0):
446 Runs `cmd` that returns JSON data (normally the command ends with 'json')
447 and compare with `data` contents. Retry by default for 10 seconds
451 return router_json_cmp(router
, cmd
, data
, exact
)
453 ok
, _
= run_and_expect(test_func
, None, int(retry_timeout
), 1)
458 "Converting Integer to DPID"
462 dpid
= "0" * (16 - len(dpid
)) + dpid
466 "Unable to derive default datapath ID - "
467 "please either specify a dpid or use a "
468 "canonical switch name such as s23."
472 def get_textdiff(text1
, text2
, title1
="", title2
="", **opts
):
473 "Returns empty string if same or formatted diff"
476 difflib
.unified_diff(text1
, text2
, fromfile
=title1
, tofile
=title2
, **opts
)
478 # Clean up line endings
479 diff
= os
.linesep
.join([s
for s
in diff
.splitlines() if s
])
483 def difflines(text1
, text2
, title1
="", title2
="", **opts
):
484 "Wrapper for get_textdiff to avoid string transformations."
485 text1
= ("\n".join(text1
.rstrip().splitlines()) + "\n").splitlines(1)
486 text2
= ("\n".join(text2
.rstrip().splitlines()) + "\n").splitlines(1)
487 return get_textdiff(text1
, text2
, title1
, title2
, **opts
)
490 def get_file(content
):
492 Generates a temporary file in '/tmp' with `content` and returns the file name.
494 if isinstance(content
, list) or isinstance(content
, tuple):
495 content
= "\n".join(content
)
496 fde
= tempfile
.NamedTemporaryFile(mode
="w", delete
=False)
503 def normalize_text(text
):
505 Strips formating spaces/tabs, carriage returns and trailing whitespace.
507 text
= re
.sub(r
"[ \t]+", " ", text
)
508 text
= re
.sub(r
"\r", "", text
)
510 # Remove whitespace in the middle of text.
511 text
= re
.sub(r
"[ \t]+\n", "\n", text
)
512 # Remove whitespace at the end of the text.
520 Parses unix name output to check if running on GNU/Linux.
522 Returns True if running on Linux, returns False otherwise.
525 if os
.uname()[0] == "Linux":
530 def iproute2_is_vrf_capable():
532 Checks if the iproute2 version installed on the system is capable of
533 handling VRFs by interpreting the output of the 'ip' utility found in PATH.
535 Returns True if capability can be detected, returns False otherwise.
540 subp
= subprocess
.Popen(
541 ["ip", "route", "show", "vrf"],
542 stdout
=subprocess
.PIPE
,
543 stderr
=subprocess
.PIPE
,
544 stdin
=subprocess
.PIPE
,
546 iproute2_err
= subp
.communicate()[1].splitlines()[0].split()[0]
548 if iproute2_err
!= "Error:":
555 def iproute2_is_fdb_get_capable():
557 Checks if the iproute2 version installed on the system is capable of
558 handling `bridge fdb get` commands to query neigh table resolution.
560 Returns True if capability can be detected, returns False otherwise.
565 subp
= subprocess
.Popen(
566 ["bridge", "fdb", "get", "help"],
567 stdout
=subprocess
.PIPE
,
568 stderr
=subprocess
.PIPE
,
569 stdin
=subprocess
.PIPE
,
571 iproute2_out
= subp
.communicate()[1].splitlines()[0].split()[0]
573 if "Usage" in str(iproute2_out
):
580 def module_present_linux(module
, load
):
582 Returns whether `module` is present.
584 If `load` is true, it will try to load it via modprobe.
586 with
open("/proc/modules", "r") as modules_file
:
587 if module
.replace("-", "_") in modules_file
.read():
589 cmd
= "/sbin/modprobe {}{}".format("" if load
else "-n ", module
)
590 if os
.system(cmd
) != 0:
596 def module_present_freebsd(module
, load
):
600 def module_present(module
, load
=True):
601 if sys
.platform
.startswith("linux"):
602 return module_present_linux(module
, load
)
603 elif sys
.platform
.startswith("freebsd"):
604 return module_present_freebsd(module
, load
)
607 def version_cmp(v1
, v2
):
609 Compare two version strings and returns:
611 * `-1`: if `v1` is less than `v2`
612 * `0`: if `v1` is equal to `v2`
613 * `1`: if `v1` is greater than `v2`
615 Raises `ValueError` if versions are not well formated.
617 vregex
= r
"(?P<whole>\d+(\.(\d+))*)"
618 v1m
= re
.match(vregex
, v1
)
619 v2m
= re
.match(vregex
, v2
)
620 if v1m
is None or v2m
is None:
621 raise ValueError("got a invalid version string")
624 v1g
= v1m
.group("whole").split(".")
625 v2g
= v2m
.group("whole").split(".")
627 # Get the longest version string
632 # Reverse list because we are going to pop the tail
635 for _
in range(vnum
):
663 def interface_set_status(node
, ifacename
, ifaceaction
=False, vrf_name
=None):
665 str_ifaceaction
= "no shutdown"
667 str_ifaceaction
= "shutdown"
669 cmd
= 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
670 ifacename
, str_ifaceaction
674 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
675 ifacename
, vrf_name
, str_ifaceaction
681 def ip4_route_zebra(node
, vrf_name
=None):
683 Gets an output of 'show ip route' command. It can be used
684 with comparing the output to a reference
687 tmp
= node
.vtysh_cmd("show ip route")
689 tmp
= node
.vtysh_cmd("show ip route vrf {0}".format(vrf_name
))
690 output
= re
.sub(r
" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp
)
692 lines
= output
.splitlines()
694 while lines
and (not lines
[0].strip() or not header_found
):
695 if "o - offload failure" in lines
[0]:
698 return "\n".join(lines
)
701 def ip6_route_zebra(node
, vrf_name
=None):
703 Retrieves the output of 'show ipv6 route [vrf vrf_name]', then
704 canonicalizes it by eliding link-locals.
708 tmp
= node
.vtysh_cmd("show ipv6 route")
710 tmp
= node
.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name
))
713 output
= re
.sub(r
" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp
)
715 # Mask out the link-local addresses
716 output
= re
.sub(r
"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output
)
718 lines
= output
.splitlines()
720 while lines
and (not lines
[0].strip() or not header_found
):
721 if "o - offload failure" in lines
[0]:
725 return "\n".join(lines
)
728 def proto_name_to_number(protocol
):
744 ) # default return same as input
749 Gets a structured return of the command 'ip route'. It can be used in
750 conjunction with json_cmp() to provide accurate assert explanations.
765 output
= normalize_text(node
.run("ip route")).splitlines()
768 columns
= line
.split(" ")
769 route
= result
[columns
[0]] = {}
771 for column
in columns
:
773 route
["dev"] = column
775 route
["via"] = column
777 # translate protocol names back to numbers
778 route
["proto"] = proto_name_to_number(column
)
780 route
["metric"] = column
782 route
["scope"] = column
788 def ip4_vrf_route(node
):
790 Gets a structured return of the command 'ip route show vrf {0}-cust1'.
791 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
806 output
= normalize_text(
807 node
.run("ip route show vrf {0}-cust1".format(node
.name
))
812 columns
= line
.split(" ")
813 route
= result
[columns
[0]] = {}
815 for column
in columns
:
817 route
["dev"] = column
819 route
["via"] = column
821 # translate protocol names back to numbers
822 route
["proto"] = proto_name_to_number(column
)
824 route
["metric"] = column
826 route
["scope"] = column
834 Gets a structured return of the command 'ip -6 route'. It can be used in
835 conjunction with json_cmp() to provide accurate assert explanations.
849 output
= normalize_text(node
.run("ip -6 route")).splitlines()
852 columns
= line
.split(" ")
853 route
= result
[columns
[0]] = {}
855 for column
in columns
:
857 route
["dev"] = column
859 route
["via"] = column
861 # translate protocol names back to numbers
862 route
["proto"] = proto_name_to_number(column
)
864 route
["metric"] = column
866 route
["pref"] = column
872 def ip6_vrf_route(node
):
874 Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'.
875 It can be used in conjunction with json_cmp() to provide accurate assert explanations.
889 output
= normalize_text(
890 node
.run("ip -6 route show vrf {0}-cust1".format(node
.name
))
894 columns
= line
.split(" ")
895 route
= result
[columns
[0]] = {}
897 for column
in columns
:
899 route
["dev"] = column
901 route
["via"] = column
903 # translate protocol names back to numbers
904 route
["proto"] = proto_name_to_number(column
)
906 route
["metric"] = column
908 route
["pref"] = column
916 Gets a structured return of the command 'ip rule'. It can be used in
917 conjunction with json_cmp() to provide accurate assert explanations.
933 "from": "1.2.0.0/16",
938 output
= normalize_text(node
.run("ip rule")).splitlines()
941 columns
= line
.split(" ")
944 # remove last character, since it is ':'
945 pref
= columns
[0][:-1]
948 for column
in columns
:
950 route
["from"] = column
954 route
["proto"] = column
956 route
["iif"] = column
958 route
["fwmark"] = column
965 def sleep(amount
, reason
=None):
967 Sleep wrapper that registers in the log the amount of sleep
970 logger
.info("Sleeping for {} seconds".format(amount
))
972 logger
.info(reason
+ " ({} seconds)".format(amount
))
977 def checkAddressSanitizerError(output
, router
, component
, logdir
=""):
978 "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
980 def processAddressSanitizerError(asanErrorRe
, output
, router
, component
):
982 "%s: %s triggered an exception by AddressSanitizer\n" % (router
, component
)
984 # Sanitizer Error found in log
985 pidMark
= asanErrorRe
.group(1)
986 addressSanitizerLog
= re
.search(
987 "%s(.*)%s" % (pidMark
, pidMark
), output
, re
.DOTALL
989 if addressSanitizerLog
:
990 # Find Calling Test. Could be multiple steps back
991 testframe
= sys
._current
_frames
().values()[0]
994 test
= os
.path
.splitext(
995 os
.path
.basename(testframe
.f_globals
["__file__"])
997 if (test
!= "topotest") and (test
!= "topogen"):
998 # Found the calling test
999 callingTest
= os
.path
.basename(testframe
.f_globals
["__file__"])
1002 testframe
= testframe
.f_back
1004 # somehow couldn't find the test script.
1005 callingTest
= "unknownTest"
1007 # Now finding Calling Procedure
1010 callingProc
= sys
._getframe
(level
).f_code
.co_name
1012 (callingProc
!= "processAddressSanitizerError")
1013 and (callingProc
!= "checkAddressSanitizerError")
1014 and (callingProc
!= "checkRouterCores")
1015 and (callingProc
!= "stopRouter")
1016 and (callingProc
!= "stop")
1017 and (callingProc
!= "stop_topology")
1018 and (callingProc
!= "checkRouterRunning")
1019 and (callingProc
!= "check_router_running")
1020 and (callingProc
!= "routers_have_failure")
1022 # Found the calling test
1026 # something wrong - couldn't found the calling test function
1027 callingProc
= "unknownProc"
1028 with
open("/tmp/AddressSanitzer.txt", "a") as addrSanFile
:
1030 "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1031 % (callingTest
, callingProc
, router
)
1034 "\n".join(addressSanitizerLog
.group(1).splitlines()) + "\n"
1036 addrSanFile
.write("## Error: %s\n\n" % asanErrorRe
.group(2))
1038 "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
1039 % (callingTest
, callingProc
, router
)
1043 + "\n ".join(addressSanitizerLog
.group(1).splitlines())
1046 addrSanFile
.write("\n---------------\n")
1049 addressSanitizerError
= re
.search(
1050 r
"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
1052 if addressSanitizerError
:
1053 processAddressSanitizerError(addressSanitizerError
, output
, router
, component
)
1056 # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
1058 filepattern
= logdir
+ "/" + router
+ ".asan." + component
+ ".*"
1060 "Log check for %s on %s, pattern %s\n" % (component
, router
, filepattern
)
1062 for file in glob
.glob(filepattern
):
1063 with
open(file, "r") as asanErrorFile
:
1064 asanError
= asanErrorFile
.read()
1065 addressSanitizerError
= re
.search(
1066 r
"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
1068 if addressSanitizerError
:
1069 processAddressSanitizerError(
1070 addressSanitizerError
, asanError
, router
, component
1076 def _sysctl_atleast(commander
, variable
, min_value
):
1077 if isinstance(min_value
, tuple):
1078 min_value
= list(min_value
)
1079 is_list
= isinstance(min_value
, list)
1081 sval
= commander
.cmd_raises("sysctl -n " + variable
).strip()
1083 cur_val
= [int(x
) for x
in sval
.split()]
1089 for i
, v
in enumerate(cur_val
):
1090 if v
< min_value
[i
]:
1095 if cur_val
< min_value
:
1099 valstr
= " ".join([str(x
) for x
in min_value
])
1101 valstr
= str(min_value
)
1102 logger
.debug("Increasing sysctl %s from %s to %s", variable
, cur_val
, valstr
)
1103 commander
.cmd_raises('sysctl -w {}="{}"\n'.format(variable
, valstr
))
1106 def _sysctl_assure(commander
, variable
, value
):
1107 if isinstance(value
, tuple):
1109 is_list
= isinstance(value
, list)
1111 sval
= commander
.cmd_raises("sysctl -n " + variable
).strip()
1113 cur_val
= [int(x
) for x
in sval
.split()]
1119 for i
, v
in enumerate(cur_val
):
1125 if cur_val
!= str(value
):
1130 valstr
= " ".join([str(x
) for x
in value
])
1133 logger
.debug("Changing sysctl %s from %s to %s", variable
, cur_val
, valstr
)
1134 commander
.cmd_raises('sysctl -w {}="{}"\n'.format(variable
, valstr
))
1137 def sysctl_atleast(commander
, variable
, min_value
, raises
=False):
1139 if commander
is None:
1140 commander
= micronet
.Commander("topotest")
1141 return _sysctl_atleast(commander
, variable
, min_value
)
1142 except subprocess
.CalledProcessError
as error
:
1144 "%s: Failed to assure sysctl min value %s = %s",
1153 def sysctl_assure(commander
, variable
, value
, raises
=False):
1155 if commander
is None:
1156 commander
= micronet
.Commander("topotest")
1157 return _sysctl_assure(commander
, variable
, value
)
1158 except subprocess
.CalledProcessError
as error
:
1160 "%s: Failed to assure sysctl value %s = %s",
1170 def rlimit_atleast(rname
, min_value
, raises
=False):
1172 cval
= resource
.getrlimit(rname
)
1174 if soft
< min_value
:
1175 nval
= (min_value
, hard
if min_value
< hard
else min_value
)
1176 logger
.debug("Increasing rlimit %s from %s to %s", rname
, cval
, nval
)
1177 resource
.setrlimit(rname
, nval
)
1178 except subprocess
.CalledProcessError
as error
:
1180 "Failed to assure rlimit [%s] = %s", rname
, min_value
, exc_info
=True
1186 def fix_netns_limits(ns
):
1187 # Maximum read and write socket buffer sizes
1188 sysctl_atleast(ns
, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2**20])
1189 sysctl_atleast(ns
, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2**20])
1191 sysctl_assure(ns
, "net.ipv4.conf.all.rp_filter", 0)
1192 sysctl_assure(ns
, "net.ipv4.conf.default.rp_filter", 0)
1193 sysctl_assure(ns
, "net.ipv4.conf.lo.rp_filter", 0)
1195 sysctl_assure(ns
, "net.ipv4.conf.all.forwarding", 1)
1196 sysctl_assure(ns
, "net.ipv4.conf.default.forwarding", 1)
1198 # XXX if things fail look here as this wasn't done previously
1199 sysctl_assure(ns
, "net.ipv6.conf.all.forwarding", 1)
1200 sysctl_assure(ns
, "net.ipv6.conf.default.forwarding", 1)
1203 sysctl_assure(ns
, "net.ipv4.conf.default.arp_announce", 2)
1204 sysctl_assure(ns
, "net.ipv4.conf.default.arp_notify", 1)
1205 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1206 sysctl_assure(ns
, "net.ipv4.conf.default.arp_ignore", 0)
1207 sysctl_assure(ns
, "net.ipv4.conf.all.arp_announce", 2)
1208 sysctl_assure(ns
, "net.ipv4.conf.all.arp_notify", 1)
1209 # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
1210 sysctl_assure(ns
, "net.ipv4.conf.all.arp_ignore", 0)
1212 sysctl_assure(ns
, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
1214 # Keep ipv6 permanent addresses on an admin down
1215 sysctl_assure(ns
, "net.ipv6.conf.all.keep_addr_on_down", 1)
1216 if version_cmp(platform
.release(), "4.20") >= 0:
1217 sysctl_assure(ns
, "net.ipv6.route.skip_notify_on_dev_down", 1)
1219 sysctl_assure(ns
, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
1220 sysctl_assure(ns
, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
1223 sysctl_atleast(ns
, "net.ipv4.igmp_max_memberships", 1000)
1225 # Use neigh information on selection of nexthop for multipath hops
1226 sysctl_assure(ns
, "net.ipv4.fib_multipath_use_neigh", 1)
1229 def fix_host_limits():
1230 """Increase system limits."""
1232 rlimit_atleast(resource
.RLIMIT_NPROC
, 8 * 1024)
1233 rlimit_atleast(resource
.RLIMIT_NOFILE
, 16 * 1024)
1234 sysctl_atleast(None, "fs.file-max", 16 * 1024)
1235 sysctl_atleast(None, "kernel.pty.max", 16 * 1024)
1238 # Original on ubuntu 17.x, but apport won't save as in namespace
1239 # |/usr/share/apport/apport %p %s %c %d %P
1240 sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
1241 sysctl_assure(None, "kernel.core_uses_pid", 1)
1242 sysctl_assure(None, "fs.suid_dumpable", 1)
1244 # Maximum connection backlog
1245 sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
1247 # Maximum read and write socket buffer sizes
1248 sysctl_atleast(None, "net.core.rmem_max", 16 * 2**20)
1249 sysctl_atleast(None, "net.core.wmem_max", 16 * 2**20)
1251 # Garbage Collection Settings for ARP and Neighbors
1252 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
1253 sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024)
1254 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024)
1255 sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024)
1256 # Hold entries for 10 minutes
1257 sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1258 sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
1261 sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
1264 sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
1266 # Increase routing table size to 128K
1267 sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024)
1268 sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024)
1271 def setup_node_tmpdir(logdir
, name
):
1272 # Cleanup old log, valgrind, and core files.
1273 subprocess
.check_call(
1274 "rm -rf {0}/{1}.valgrind.* {0}/{1}.asan.* {0}/{1}/".format(logdir
, name
),
1278 # Setup the per node directory.
1279 nodelogdir
= "{}/{}".format(logdir
, name
)
1280 subprocess
.check_call(
1281 "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir
), shell
=True
1283 logfile
= "{0}/{1}.log".format(logdir
, name
)
1288 "A Node with IPv4/IPv6 forwarding enabled"
1290 def __init__(self
, name
, *posargs
, **params
):
1291 # Backward compatibility:
1292 # Load configuration defaults like topogen.
1293 self
.config_defaults
= configparser
.ConfigParser(
1295 "verbosity": "info",
1296 "frrdir": "/usr/lib/frr",
1297 "routertype": "frr",
1302 self
.config_defaults
.read(
1303 os
.path
.join(os
.path
.dirname(os
.path
.realpath(__file__
)), "../pytest.ini")
1306 self
.perf_daemons
= {}
1308 # If this topology is using old API and doesn't have logdir
1309 # specified, then attempt to generate an unique logdir.
1310 self
.logdir
= params
.get("logdir")
1311 if self
.logdir
is None:
1312 self
.logdir
= get_logs_path(g_pytest_config
.getoption("--rundir"))
1314 if not params
.get("logger"):
1315 # If logger is present topogen has already set this up
1316 logfile
= setup_node_tmpdir(self
.logdir
, name
)
1317 l
= topolog
.get_logger(name
, log_level
="debug", target
=logfile
)
1318 params
["logger"] = l
1320 super(Router
, self
).__init
__(name
, *posargs
, **params
)
1322 self
.daemondir
= None
1323 self
.hasmpls
= False
1324 self
.routertype
= "frr"
1325 self
.unified_config
= None
1348 self
.daemons_options
= {"zebra": ""}
1349 self
.reportCores
= True
1352 self
.ns_cmd
= "sudo nsenter -a -t {} ".format(self
.pid
)
1354 # Allow escaping from running inside docker
1355 cgroup
= open("/proc/1/cgroup").read()
1356 m
= re
.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup
)
1358 self
.ns_cmd
= "docker exec -it {} ".format(m
.group(1)) + self
.ns_cmd
1362 logger
.debug("CMD to enter {}: {}".format(self
.name
, self
.ns_cmd
))
1364 def _config_frr(self
, **params
):
1365 "Configure FRR binaries"
1366 self
.daemondir
= params
.get("frrdir")
1367 if self
.daemondir
is None:
1368 self
.daemondir
= self
.config_defaults
.get("topogen", "frrdir")
1370 zebra_path
= os
.path
.join(self
.daemondir
, "zebra")
1371 if not os
.path
.isfile(zebra_path
):
1372 raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path
))
1374 mgmtd_path
= os
.path
.join(self
.daemondir
, "mgmtd")
1375 if not os
.path
.isfile(mgmtd_path
):
1376 raise Exception("FRR MGMTD binary doesn't exist at {}".format(mgmtd_path
))
1378 # pylint: disable=W0221
1379 # Some params are only meaningful for the parent class.
1380 def config_host(self
, **params
):
1381 super(Router
, self
).config_host(**params
)
1383 # User did not specify the daemons directory, try to autodetect it.
1384 self
.daemondir
= params
.get("daemondir")
1385 if self
.daemondir
is None:
1386 self
.routertype
= params
.get(
1387 "routertype", self
.config_defaults
.get("topogen", "routertype")
1389 self
._config
_frr
(**params
)
1391 # Test the provided path
1392 zpath
= os
.path
.join(self
.daemondir
, "zebra")
1393 if not os
.path
.isfile(zpath
):
1394 raise Exception("No zebra binary found in {}".format(zpath
))
1396 cpath
= os
.path
.join(self
.daemondir
, "mgmtd")
1397 if not os
.path
.isfile(zpath
):
1398 raise Exception("No MGMTD binary found in {}".format(cpath
))
1399 # Allow user to specify routertype when the path was specified.
1400 if params
.get("routertype") is not None:
1401 self
.routertype
= params
.get("routertype")
1403 # Set ownership of config files
1404 self
.cmd("chown {0}:{0}vty /etc/{0}".format(self
.routertype
))
1406 def terminate(self
):
1407 # Stop running FRR daemons
1409 super(Router
, self
).terminate()
1410 os
.system("chmod -R go+rw " + self
.logdir
)
1412 # Return count of running daemons
1413 def listDaemons(self
):
1415 rc
, stdout
, _
= self
.cmd_status(
1416 "ls -1 /var/run/%s/*.pid" % self
.routertype
, warn
=False
1420 for d
in stdout
.strip().split("\n"):
1423 pid
= int(self
.cmd_raises("cat %s" % pidfile
, warn
=False).strip())
1424 name
= os
.path
.basename(pidfile
[:-4])
1426 # probably not compatible with bsd.
1427 rc
, _
, _
= self
.cmd_status("test -d /proc/{}".format(pid
), warn
=False)
1430 "%s: %s exited leaving pidfile %s (%s)",
1436 self
.cmd("rm -- " + pidfile
)
1438 ret
.append((name
, pid
))
1439 except (subprocess
.CalledProcessError
, ValueError):
1443 def stopRouter(self
, assertOnError
=True, minErrorVersion
="5.1"):
1444 # Stop Running FRR Daemons
1445 running
= self
.listDaemons()
1449 logger
.info("%s: stopping %s", self
.name
, ", ".join([x
[0] for x
in running
]))
1450 for name
, pid
in running
:
1451 logger
.debug("{}: sending SIGTERM to {}".format(self
.name
, name
))
1453 os
.kill(pid
, signal
.SIGTERM
)
1454 except OSError as err
:
1456 "%s: could not kill %s (%s): %s", self
.name
, name
, pid
, str(err
)
1459 running
= self
.listDaemons()
1461 for _
in range(0, 30):
1464 "{}: waiting for daemons stopping: {}".format(
1465 self
.name
, ", ".join([x
[0] for x
in running
])
1468 running
= self
.listDaemons()
1474 "%s: sending SIGBUS to: %s",
1476 ", ".join([x
[0] for x
in running
]),
1478 for name
, pid
in running
:
1479 pidfile
= "/var/run/{}/{}.pid".format(self
.routertype
, name
)
1480 logger
.info("%s: killing %s", self
.name
, name
)
1481 self
.cmd("kill -SIGBUS %d" % pid
)
1482 self
.cmd("rm -- " + pidfile
)
1486 "%s: waiting for daemons to exit/core after initial SIGBUS" % self
.name
,
1489 errors
= self
.checkRouterCores(reportOnce
=True)
1490 if self
.checkRouterVersion("<", minErrorVersion
):
1491 # ignore errors in old versions
1493 if assertOnError
and (errors
is not None) and len(errors
) > 0:
1494 assert "Errors found - details follow:" == 0, errors
1497 def removeIPs(self
):
1498 for interface
in self
.intfNames():
1500 self
.intf_ip_cmd(interface
, "ip -4 address flush " + interface
)
1502 interface
, "ip -6 address flush " + interface
+ " scope global"
1504 except Exception as ex
:
1505 logger
.error("%s can't remove IPs %s", self
, str(ex
))
1507 # assert False, "can't remove IPs %s" % str(ex)
1509 def checkCapability(self
, daemon
, param
):
1510 if param
is not None:
1511 daemon_path
= os
.path
.join(self
.daemondir
, daemon
)
1512 daemon_search_option
= param
.replace("-", "")
1514 "{0} -h | grep {1}".format(daemon_path
, daemon_search_option
)
1516 if daemon_search_option
not in output
:
1520 def loadConf(self
, daemon
, source
=None, param
=None):
1521 """Enabled and set config for a daemon.
1523 Arranges for loading of daemon configuration from the specified source. Possible
1524 `source` values are `None` for an empty config file, a path name which is used
1525 directly, or a file name with no path components which is first looked for
1526 directly and then looked for under a sub-directory named after router.
1529 # Unfortunately this API allowsfor source to not exist for any and all routers.
1531 source
= f
"{daemon}.conf"
1534 head
, tail
= os
.path
.split(source
)
1535 if not head
and not self
.path_exists(tail
):
1536 script_dir
= os
.environ
["PYTEST_TOPOTEST_SCRIPTDIR"]
1537 router_relative
= os
.path
.join(script_dir
, self
.name
, tail
)
1538 if self
.path_exists(router_relative
):
1539 source
= router_relative
1541 "using router relative configuration: {}".format(source
)
1544 # print "Daemons before:", self.daemons
1545 if daemon
in self
.daemons
.keys() or daemon
== "frr":
1547 self
.unified_config
= 1
1549 self
.daemons
[daemon
] = 1
1550 if param
is not None:
1551 self
.daemons_options
[daemon
] = param
1552 conf_file
= "/etc/{}/{}.conf".format(self
.routertype
, daemon
)
1553 if source
is None or not os
.path
.exists(source
):
1554 if daemon
== "frr" or not self
.unified_config
:
1555 self
.cmd_raises("rm -f " + conf_file
)
1556 self
.cmd_raises("touch " + conf_file
)
1558 # copy zebra.conf to mgmtd folder, which can be used during startup
1559 if daemon
== "zebra":
1560 conf_file_mgmt
= "/etc/{}/{}.conf".format(self
.routertype
, "mgmtd")
1561 self
.cmd_raises("cp {} {}".format(source
, conf_file_mgmt
))
1562 self
.cmd_raises("cp {} {}".format(source
, conf_file
))
1564 if not (self
.unified_config
or daemon
== "frr"):
1565 self
.cmd_raises("chown {0}:{0} {1}".format(self
.routertype
, conf_file
))
1566 self
.cmd_raises("chmod 664 {}".format(conf_file
))
1568 if (daemon
== "snmpd") and (self
.routertype
== "frr"):
1569 # /etc/snmp is private mount now
1570 self
.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf')
1571 self
.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
1573 if (daemon
== "zebra") and (self
.daemons
["mgmtd"] == 0):
1574 # Add mgmtd with zebra - if it exists
1575 mgmtd_path
= os
.path
.join(self
.daemondir
, "mgmtd")
1576 if os
.path
.isfile(mgmtd_path
):
1577 self
.daemons
["mgmtd"] = 1
1578 self
.daemons_options
["mgmtd"] = ""
1579 # Auto-Started mgmtd has no config, so it will read from zebra config
1581 if (daemon
== "zebra") and (self
.daemons
["staticd"] == 0):
1582 # Add staticd with zebra - if it exists
1583 staticd_path
= os
.path
.join(self
.daemondir
, "staticd")
1584 if os
.path
.isfile(staticd_path
):
1585 self
.daemons
["staticd"] = 1
1586 self
.daemons_options
["staticd"] = ""
1587 # Auto-Started staticd has no config, so it will read from zebra config
1590 logger
.warning("No daemon {} known".format(daemon
))
1591 # print "Daemons after:", self.daemons
1593 def runInWindow(self
, cmd
, title
=None):
1594 return self
.run_in_window(cmd
, title
)
1596 def startRouter(self
, tgen
=None):
1597 if self
.unified_config
:
1599 'echo "service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1603 # Disable integrated-vtysh-config
1605 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
1610 "chown %s:%svty /etc/%s/vtysh.conf"
1611 % (self
.routertype
, self
.routertype
, self
.routertype
)
1613 # TODO remove the following lines after all tests are migrated to Topogen.
1614 # Try to find relevant old logfiles in /tmp and delete them
1615 map(os
.remove
, glob
.glob("{}/{}/*.log".format(self
.logdir
, self
.name
)))
1616 # Remove old core files
1617 map(os
.remove
, glob
.glob("{}/{}/*.dmp".format(self
.logdir
, self
.name
)))
1618 # Remove IP addresses from OS first - we have them in zebra.conf
1620 # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
1621 # No error - but return message and skip all the tests
1622 if self
.daemons
["ldpd"] == 1:
1623 ldpd_path
= os
.path
.join(self
.daemondir
, "ldpd")
1624 if not os
.path
.isfile(ldpd_path
):
1625 logger
.info("LDP Test, but no ldpd compiled or installed")
1626 return "LDP Test, but no ldpd compiled or installed"
1628 if version_cmp(platform
.release(), "4.5") < 0:
1629 logger
.info("LDP Test need Linux Kernel 4.5 minimum")
1630 return "LDP Test need Linux Kernel 4.5 minimum"
1631 # Check if have mpls
1633 self
.hasmpls
= tgen
.hasmpls
1634 if self
.hasmpls
!= True:
1636 "LDP/MPLS Tests will be skipped, platform missing module(s)"
1639 # Test for MPLS Kernel modules available
1640 self
.hasmpls
= False
1641 if not module_present("mpls-router"):
1643 "MPLS tests will not run (missing mpls-router kernel module)"
1645 elif not module_present("mpls-iptunnel"):
1647 "MPLS tests will not run (missing mpls-iptunnel kernel module)"
1651 if self
.hasmpls
!= True:
1652 return "LDP/MPLS Tests need mpls kernel modules"
1654 # Really want to use sysctl_atleast here, but only when MPLS is actually being
1656 self
.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
1658 if g_pytest_config
.name_in_option_list(self
.name
, "--shell"):
1659 self
.run_in_window(os
.getenv("SHELL", "bash"), title
="sh-%s" % self
.name
)
1661 if self
.daemons
["eigrpd"] == 1:
1662 eigrpd_path
= os
.path
.join(self
.daemondir
, "eigrpd")
1663 if not os
.path
.isfile(eigrpd_path
):
1664 logger
.info("EIGRP Test, but no eigrpd compiled or installed")
1665 return "EIGRP Test, but no eigrpd compiled or installed"
1667 if self
.daemons
["bfdd"] == 1:
1668 bfdd_path
= os
.path
.join(self
.daemondir
, "bfdd")
1669 if not os
.path
.isfile(bfdd_path
):
1670 logger
.info("BFD Test, but no bfdd compiled or installed")
1671 return "BFD Test, but no bfdd compiled or installed"
1673 status
= self
.startRouterDaemons(tgen
=tgen
)
1675 if g_pytest_config
.name_in_option_list(self
.name
, "--vtysh"):
1676 self
.run_in_window("vtysh", title
="vt-%s" % self
.name
)
1678 if self
.unified_config
:
1679 self
.cmd("vtysh -f /etc/frr/frr.conf")
1683 def getStdErr(self
, daemon
):
1684 return self
.getLog("err", daemon
)
1686 def getStdOut(self
, daemon
):
1687 return self
.getLog("out", daemon
)
1689 def getLog(self
, log
, daemon
):
1690 filename
= "{}/{}/{}.{}".format(self
.logdir
, self
.name
, daemon
, log
)
1692 with
open(filename
) as file:
1696 def startRouterDaemons(self
, daemons
=None, tgen
=None):
1697 "Starts FRR daemons for this router."
1699 asan_abort
= bool(g_pytest_config
.option
.asan_abort
)
1700 gdb_breakpoints
= g_pytest_config
.get_option_list("--gdb-breakpoints")
1701 gdb_daemons
= g_pytest_config
.get_option_list("--gdb-daemons")
1702 gdb_routers
= g_pytest_config
.get_option_list("--gdb-routers")
1703 valgrind_extra
= bool(g_pytest_config
.option
.valgrind_extra
)
1704 valgrind_memleaks
= bool(g_pytest_config
.option
.valgrind_memleaks
)
1705 strace_daemons
= g_pytest_config
.get_option_list("--strace-daemons")
1707 # Get global bundle data
1708 if not self
.path_exists("/etc/frr/support_bundle_commands.conf"):
1709 # Copy global value if was covered by namespace mount
1711 if os
.path
.exists("/etc/frr/support_bundle_commands.conf"):
1712 with
open("/etc/frr/support_bundle_commands.conf", "r") as rf
:
1713 bundle_data
= rf
.read()
1715 "cat > /etc/frr/support_bundle_commands.conf",
1719 # Starts actual daemons without init (ie restart)
1720 # cd to per node directory
1721 self
.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self
.logdir
, self
.name
))
1722 self
.set_cwd("{}/{}".format(self
.logdir
, self
.name
))
1723 self
.cmd("umask 000")
1725 # Re-enable to allow for report per run
1726 self
.reportCores
= True
1728 # XXX: glue code forward ported from removed function.
1729 if self
.version
is None:
1730 self
.version
= self
.cmd(
1731 os
.path
.join(self
.daemondir
, "bgpd") + " -v"
1733 logger
.info("{}: running version: {}".format(self
.name
, self
.version
))
1736 perf_options
= g_pytest_config
.get_option("--perf-options", "-g")
1737 for perf
in g_pytest_config
.get_option("--perf", []):
1739 daemon
, routers
= perf
.split(",", 1)
1740 perfds
[daemon
] = routers
.split(",")
1743 perfds
[daemon
] = ["all"]
1746 for logd
in g_pytest_config
.get_option("--logd", []):
1748 daemon
, routers
= logd
.split(",", 1)
1749 logd_options
[daemon
] = routers
.split(",")
1752 logd_options
[daemon
] = ["all"]
1754 # If `daemons` was specified then some upper API called us with
1755 # specific daemons, otherwise just use our own configuration.
1757 if daemons
is not None:
1758 daemons_list
= daemons
1760 # Append all daemons configured.
1761 for daemon
in self
.daemons
:
1762 if self
.daemons
[daemon
] == 1:
1763 daemons_list
.append(daemon
)
1766 check_daemon_files
= []
1768 def start_daemon(daemon
, extra_opts
=None):
1769 daemon_opts
= self
.daemons_options
.get(daemon
, "")
1771 # get pid and vty filenames and remove the files
1772 m
= re
.match(r
"(.* |^)-n (\d+)( ?.*|$)", daemon_opts
)
1773 dfname
= daemon
if not m
else "{}-{}".format(daemon
, m
.group(2))
1774 runbase
= "/var/run/{}/{}".format(self
.routertype
, dfname
)
1775 # If this is a new system bring-up remove the pid/vty files, otherwise
1776 # do not since apparently presence of the pidfile impacts BGP GR
1777 self
.cmd_status("rm -f {0}.pid {0}.vty".format(runbase
))
1779 rediropt
= " > {0}.out 2> {0}.err".format(daemon
)
1780 if daemon
== "snmpd":
1781 binary
= "/usr/sbin/snmpd"
1783 cmdopt
= "{} -C -c /etc/frr/snmpd.conf -p ".format(
1785 ) + "{}.pid -x /etc/frr/agentx".format(runbase
)
1786 # check_daemon_files.append(runbase + ".pid")
1788 binary
= os
.path
.join(self
.daemondir
, daemon
)
1789 check_daemon_files
.extend([runbase
+ ".pid", runbase
+ ".vty"])
1791 cmdenv
= "ASAN_OPTIONS="
1793 cmdenv
+= "abort_on_error=1:"
1794 cmdenv
+= "log_path={0}/{1}.asan.{2} ".format(
1795 self
.logdir
, self
.name
, daemon
1798 if valgrind_memleaks
:
1799 this_dir
= os
.path
.dirname(
1800 os
.path
.abspath(os
.path
.realpath(__file__
))
1802 supp_file
= os
.path
.abspath(
1803 os
.path
.join(this_dir
, "../../../tools/valgrind.supp")
1805 cmdenv
+= " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
1806 daemon
, self
.logdir
, self
.name
, supp_file
1810 " --gen-suppressions=all --expensive-definedness-checks=yes"
1812 elif daemon
in strace_daemons
or "all" in strace_daemons
:
1813 cmdenv
= "strace -f -D -o {1}/{2}.strace.{0} ".format(
1814 daemon
, self
.logdir
, self
.name
1817 cmdopt
= "{} --command-log-always ".format(daemon_opts
)
1818 cmdopt
+= "--log file:{}.log --log-level debug".format(daemon
)
1820 if daemon
in logd_options
:
1821 logdopt
= logd_options
[daemon
]
1822 if "all" in logdopt
or self
.name
in logdopt
:
1823 tail_log_files
.append(
1824 "{}/{}/{}.log".format(self
.logdir
, self
.name
, daemon
)
1827 cmdopt
+= " " + extra_opts
1830 (gdb_routers
or gdb_daemons
)
1832 not gdb_routers
or self
.name
in gdb_routers
or "all" in gdb_routers
1834 and (not gdb_daemons
or daemon
in gdb_daemons
or "all" in gdb_daemons
)
1836 if daemon
== "snmpd":
1840 gdbcmd
= "sudo -E gdb " + binary
1842 gdbcmd
+= " -ex 'set breakpoint pending on'"
1843 for bp
in gdb_breakpoints
:
1844 gdbcmd
+= " -ex 'b {}'".format(bp
)
1845 gdbcmd
+= " -ex 'run {}'".format(cmdopt
)
1847 self
.run_in_window(gdbcmd
, daemon
)
1850 "%s: %s %s launched in gdb window", self
, self
.routertype
, daemon
1852 elif daemon
in perfds
and (
1853 self
.name
in perfds
[daemon
] or "all" in perfds
[daemon
]
1857 ["perf record {} --".format(perf_options
), binary
, cmdopt
]
1860 self
.perf_daemons
[daemon
] = p
1861 if p
.poll() and p
.returncode
:
1863 '%s: Failed to launch "%s" (%s) with perf using: %s',
1871 "%s: %s %s started with perf", self
, self
.routertype
, daemon
1874 if daemon
!= "snmpd":
1879 self
.cmd_raises(" ".join([cmdenv
, binary
, cmdopt
]), warn
=False)
1880 except subprocess
.CalledProcessError
as error
:
1882 '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
1887 '\n:stdout: "{}"'.format(error
.stdout
.strip())
1890 '\n:stderr: "{}"'.format(error
.stderr
.strip())
1895 logger
.debug("%s: %s %s started", self
, self
.routertype
, daemon
)
1898 if "mgmtd" in daemons_list
:
1899 start_daemon("mgmtd")
1900 while "mgmtd" in daemons_list
:
1901 daemons_list
.remove("mgmtd")
1903 # Start Zebra after mgmtd
1904 if "zebra" in daemons_list
:
1905 start_daemon("zebra", "-s 90000000")
1906 while "zebra" in daemons_list
:
1907 daemons_list
.remove("zebra")
1909 # Start staticd next if required
1910 if "staticd" in daemons_list
:
1911 start_daemon("staticd")
1912 while "staticd" in daemons_list
:
1913 daemons_list
.remove("staticd")
1915 if "snmpd" in daemons_list
:
1916 # Give zerbra a chance to configure interface addresses that snmpd daemon
1920 start_daemon("snmpd")
1921 while "snmpd" in daemons_list
:
1922 daemons_list
.remove("snmpd")
1924 # Now start all the other daemons
1925 for daemon
in daemons_list
:
1926 if self
.daemons
[daemon
] == 0:
1928 start_daemon(daemon
)
1930 # Check if daemons are running.
1931 wait_time
= 30 if (gdb_routers
or gdb_daemons
) else 10
1932 timeout
= Timeout(wait_time
)
1933 for remaining
in timeout
:
1934 if not check_daemon_files
:
1936 check
= check_daemon_files
[0]
1937 if self
.path_exists(check
):
1938 check_daemon_files
.pop(0)
1940 self
.logger
.debug("Waiting {}s for {} to appear".format(remaining
, check
))
1943 if check_daemon_files
:
1944 assert False, "Timeout({}) waiting for {} to appear on {}".format(
1945 wait_time
, check_daemon_files
[0], self
.name
1948 # Update the permissions on the log files
1949 self
.cmd("chown frr:frr -R {}/{}".format(self
.logdir
, self
.name
))
1950 self
.cmd("chmod ug+rwX,o+r -R {}/{}".format(self
.logdir
, self
.name
))
1952 if "frr" in logd_options
:
1953 logdopt
= logd_options
["frr"]
1954 if "all" in logdopt
or self
.name
in logdopt
:
1955 tail_log_files
.append("{}/{}/frr.log".format(self
.logdir
, self
.name
))
1957 for tailf
in tail_log_files
:
1958 self
.run_in_window("tail -n10000 -F " + tailf
, title
=tailf
, background
=True)
1962 def pid_exists(self
, pid
):
1966 # If we are not using PID namespaces then we will be a parent of the pid,
1967 # otherwise the init process of the PID namespace will have reaped the proc.
1968 os
.waitpid(pid
, os
.WNOHANG
)
1972 rc
, o
, e
= self
.cmd_status("kill -0 " + str(pid
), warn
=False)
1973 return rc
== 0 or "No such process" not in e
1975 def killRouterDaemons(
1976 self
, daemons
, wait
=True, assertOnError
=True, minErrorVersion
="5.1"
1979 # Daemons(user specified daemon only) using SIGKILL
1980 rundaemons
= self
.cmd("ls -1 /var/run/%s/*.pid" % self
.routertype
)
1982 daemonsNotRunning
= []
1983 if re
.search(r
"No such file or directory", rundaemons
):
1985 for daemon
in daemons
:
1986 if rundaemons
is not None and daemon
in rundaemons
:
1988 dmns
= rundaemons
.split("\n")
1989 # Exclude empty string at end of list
1991 if re
.search(r
"%s" % daemon
, d
):
1992 daemonpidfile
= d
.rstrip()
1993 daemonpid
= self
.cmd("cat %s" % daemonpidfile
).rstrip()
1994 if daemonpid
.isdigit() and self
.pid_exists(int(daemonpid
)):
1996 "{}: killing {}".format(
1998 os
.path
.basename(daemonpidfile
.rsplit(".", 1)[0]),
2001 self
.cmd_status("kill -KILL {}".format(daemonpid
))
2002 if self
.pid_exists(int(daemonpid
)):
2004 while wait
and numRunning
> 0:
2007 "{}: waiting for {} daemon to be stopped".format(
2012 # 2nd round of kill if daemons didn't exit
2014 if re
.search(r
"%s" % daemon
, d
):
2015 daemonpid
= self
.cmd("cat %s" % d
.rstrip()).rstrip()
2016 if daemonpid
.isdigit() and self
.pid_exists(
2020 "{}: killing {}".format(
2023 d
.rstrip().rsplit(".", 1)[0]
2028 "kill -KILL {}".format(daemonpid
)
2030 if daemonpid
.isdigit() and not self
.pid_exists(
2034 self
.cmd("rm -- {}".format(daemonpidfile
))
2036 errors
= self
.checkRouterCores(reportOnce
=True)
2037 if self
.checkRouterVersion("<", minErrorVersion
):
2038 # ignore errors in old versions
2040 if assertOnError
and len(errors
) > 0:
2041 assert "Errors found - details follow:" == 0, errors
2043 daemonsNotRunning
.append(daemon
)
2044 if len(daemonsNotRunning
) > 0:
2045 errors
= errors
+ "Daemons are not running", daemonsNotRunning
2049 def checkRouterCores(self
, reportLeaks
=True, reportOnce
=False):
2050 if reportOnce
and not self
.reportCores
:
2054 for daemon
in self
.daemons
:
2055 if self
.daemons
[daemon
] == 1:
2056 # Look for core file
2057 corefiles
= glob
.glob(
2058 "{}/{}/{}_core*.dmp".format(self
.logdir
, self
.name
, daemon
)
2060 if len(corefiles
) > 0:
2061 backtrace
= gdb_core(self
, daemon
, corefiles
)
2064 + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s"
2065 % (self
.name
, daemon
, backtrace
)
2069 log
= self
.getStdErr(daemon
)
2070 if "memstats" in log
:
2072 "%s: %s has memory leaks:\n" % (self
.name
, daemon
)
2074 traces
= traces
+ "\n%s: %s has memory leaks:\n" % (
2078 log
= re
.sub("core_handler: ", "", log
)
2080 r
"(showing active allocations in memory group [a-zA-Z0-9]+)",
2084 log
= re
.sub("memstats: ", " ", log
)
2085 sys
.stderr
.write(log
)
2087 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2088 if checkAddressSanitizerError(
2089 self
.getStdErr(daemon
), self
.name
, daemon
, self
.logdir
2092 "%s: Daemon %s killed by AddressSanitizer" % (self
.name
, daemon
)
2094 traces
= traces
+ "\n%s: Daemon %s killed by AddressSanitizer" % (
2100 self
.reportCores
= False
2103 def checkRouterRunning(self
):
2104 "Check if router daemons are running and collect crashinfo they don't run"
2108 daemonsRunning
= self
.cmd(
2109 'vtysh -c "show logging" | grep "Logging configuration for"'
2111 # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
2112 if checkAddressSanitizerError(daemonsRunning
, self
.name
, "vtysh"):
2113 return "%s: vtysh killed by AddressSanitizer" % (self
.name
)
2115 for daemon
in self
.daemons
:
2116 if daemon
== "snmpd":
2118 if (self
.daemons
[daemon
] == 1) and not (daemon
in daemonsRunning
):
2119 sys
.stderr
.write("%s: Daemon %s not running\n" % (self
.name
, daemon
))
2120 if daemon
== "staticd":
2122 "You may have a copy of staticd installed but are attempting to test against\n"
2125 "a version of FRR that does not have staticd, please cleanup the install dir\n"
2128 # Look for core file
2129 corefiles
= glob
.glob(
2130 "{}/{}/{}_core*.dmp".format(self
.logdir
, self
.name
, daemon
)
2132 if len(corefiles
) > 0:
2133 gdb_core(self
, daemon
, corefiles
)
2135 # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
2137 "{}/{}/{}.log".format(self
.logdir
, self
.name
, daemon
)
2139 log_tail
= subprocess
.check_output(
2141 "tail -n20 {}/{}/{}.log 2> /dev/null".format(
2142 self
.logdir
, self
.name
, daemon
2148 "\nFrom %s %s %s log file:\n"
2149 % (self
.routertype
, self
.name
, daemon
)
2151 sys
.stderr
.write("%s\n" % log_tail
)
2153 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2154 if checkAddressSanitizerError(
2155 self
.getStdErr(daemon
), self
.name
, daemon
, self
.logdir
2157 return "%s: Daemon %s not running - killed by AddressSanitizer" % (
2162 return "%s: Daemon %s not running" % (self
.name
, daemon
)
2165 def checkRouterVersion(self
, cmpop
, version
):
2167 Compares router version using operation `cmpop` with `version`.
2168 Valid `cmpop` values:
2169 * `>=`: has the same version or greater
2170 * '>': has greater version
2171 * '=': has the same version
2172 * '<': has a lesser version
2173 * '<=': has the same version or lesser
2175 Usage example: router.checkRouterVersion('>', '1.0')
2178 # Make sure we have version information first
2179 if self
.version
== None:
2180 self
.version
= self
.cmd(
2181 os
.path
.join(self
.daemondir
, "bgpd") + " -v"
2183 logger
.info("{}: running version: {}".format(self
.name
, self
.version
))
2185 rversion
= self
.version
2186 if rversion
== None:
2189 result
= version_cmp(rversion
, version
)
2203 def get_ipv6_linklocal(self
):
2204 "Get LinkLocal Addresses from interfaces"
2208 ifaces
= self
.cmd("ip -6 address")
2209 # Fix newlines (make them all the same)
2210 ifaces
= ("\n".join(ifaces
.splitlines()) + "\n").splitlines()
2214 m
= re
.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line
)
2216 interface
= m
.group(1)
2219 "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
2224 ll_per_if_count
+= 1
2225 if ll_per_if_count
> 1:
2226 linklocal
+= [["%s-%s" % (interface
, ll_per_if_count
), local
]]
2228 linklocal
+= [[interface
, local
]]
2231 def daemon_available(self
, daemon
):
2232 "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
2234 daemon_path
= os
.path
.join(self
.daemondir
, daemon
)
2235 if not os
.path
.isfile(daemon_path
):
2237 if daemon
== "ldpd":
2238 if version_cmp(platform
.release(), "4.5") < 0:
2240 if not module_present("mpls-router", load
=False):
2242 if not module_present("mpls-iptunnel", load
=False):
2246 def get_routertype(self
):
2247 "Return the type of Router (frr)"
2249 return self
.routertype
2251 def report_memory_leaks(self
, filename_prefix
, testscript
):
2252 "Report Memory Leaks to file prefixed with given string"
2255 filename
= filename_prefix
+ re
.sub(r
"\.py", "", testscript
) + ".txt"
2256 for daemon
in self
.daemons
:
2257 if self
.daemons
[daemon
] == 1:
2258 log
= self
.getStdErr(daemon
)
2259 if "memstats" in log
:
2262 "\nRouter {} {} StdErr Log:\n{}".format(self
.name
, daemon
, log
)
2266 # Check if file already exists
2267 fileexists
= os
.path
.isfile(filename
)
2268 leakfile
= open(filename
, "a")
2270 # New file - add header
2272 "# Memory Leak Detection for topotest %s\n\n"
2275 leakfile
.write("## Router %s\n" % self
.name
)
2276 leakfile
.write("### Process %s\n" % daemon
)
2277 log
= re
.sub("core_handler: ", "", log
)
2279 r
"(showing active allocations in memory group [a-zA-Z0-9]+)",
2283 log
= re
.sub("memstats: ", " ", log
)
2285 leakfile
.write("\n")
2291 """Convert string to unicode, depending on python version"""
2292 if sys
.version_info
[0] > 2:
2295 return unicode(s
) # pylint: disable=E0602
2299 return isinstance(o
, Mapping
)