]>
Commit | Line | Data |
---|---|---|
594b1259 MW |
1 | #!/usr/bin/env python |
2 | ||
3 | # | |
4 | # topotest.py | |
5 | # Library of helper functions for NetDEF Topology Tests | |
6 | # | |
7 | # Copyright (c) 2016 by | |
8 | # Network Device Education Foundation, Inc. ("NetDEF") | |
9 | # | |
10 | # Permission to use, copy, modify, and/or distribute this software | |
11 | # for any purpose with or without fee is hereby granted, provided | |
12 | # that the above copyright notice and this permission notice appear | |
13 | # in all copies. | |
14 | # | |
15 | # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES | |
16 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
17 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR | |
18 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY | |
19 | # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, | |
20 | # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS | |
21 | # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | |
22 | # OF THIS SOFTWARE. | |
23 | # | |
24 | ||
49581587 | 25 | import difflib |
50c40bde | 26 | import errno |
fd858290 | 27 | import functools |
594b1259 | 28 | import glob |
49581587 CH |
29 | import json |
30 | import os | |
31 | import pdb | |
32 | import platform | |
33 | import re | |
34 | import resource | |
35 | import signal | |
594b1259 | 36 | import subprocess |
49581587 | 37 | import sys |
1fca63c1 | 38 | import tempfile |
570f25d8 | 39 | import time |
49581587 | 40 | from copy import deepcopy |
594b1259 | 41 | |
49581587 | 42 | import lib.topolog as topolog |
6c131bd3 RZ |
43 | from lib.topolog import logger |
44 | ||
04ce2b97 RZ |
45 | if sys.version_info[0] > 2: |
46 | import configparser | |
c8e5983d | 47 | from collections.abc import Mapping |
04ce2b97 RZ |
48 | else: |
49 | import ConfigParser as configparser | |
c8e5983d | 50 | from collections import Mapping |
04ce2b97 | 51 | |
49581587 CH |
52 | from lib import micronet |
53 | from lib.micronet_compat import Node | |
594b1259 | 54 | |
3f950192 | 55 | g_extra_config = {} |
701a0192 | 56 | |
a53c08bc | 57 | |
49581587 CH |
58 | def get_logs_path(rundir): |
59 | logspath = topolog.get_test_logdir() | |
60 | return os.path.join(rundir, logspath) | |
61 | ||
0b25370e | 62 | |
79f6fdeb | 63 | def gdb_core(obj, daemon, corefiles): |
701a0192 | 64 | gdbcmds = """ |
79f6fdeb DL |
65 | info threads |
66 | bt full | |
67 | disassemble | |
68 | up | |
69 | disassemble | |
70 | up | |
71 | disassemble | |
72 | up | |
73 | disassemble | |
74 | up | |
75 | disassemble | |
76 | up | |
77 | disassemble | |
701a0192 | 78 | """ |
79 | gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")] | |
79f6fdeb DL |
80 | gdbcmds = [item for sl in gdbcmds for item in sl] |
81 | ||
82 | daemon_path = os.path.join(obj.daemondir, daemon) | |
83 | backtrace = subprocess.check_output( | |
701a0192 | 84 | ["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds |
79f6fdeb DL |
85 | ) |
86 | sys.stderr.write( | |
701a0192 | 87 | "\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon) |
79f6fdeb DL |
88 | ) |
89 | sys.stderr.write("%s" % backtrace) | |
90 | return backtrace | |
787e7624 | 91 | |
701a0192 | 92 | |
3668ed8d RZ |
93 | class json_cmp_result(object): |
94 | "json_cmp result class for better assertion messages" | |
95 | ||
96 | def __init__(self): | |
97 | self.errors = [] | |
98 | ||
99 | def add_error(self, error): | |
100 | "Append error message to the result" | |
2db5888d RZ |
101 | for line in error.splitlines(): |
102 | self.errors.append(line) | |
3668ed8d RZ |
103 | |
104 | def has_errors(self): | |
105 | "Returns True if there were errors, otherwise False." | |
106 | return len(self.errors) > 0 | |
107 | ||
849224d4 G |
108 | def gen_report(self): |
109 | headline = ["Generated JSON diff error report:", ""] | |
110 | return headline + self.errors | |
111 | ||
7fe06d55 | 112 | def __str__(self): |
849224d4 G |
113 | return ( |
114 | "Generated JSON diff error report:\n\n\n" + "\n".join(self.errors) + "\n\n" | |
115 | ) | |
7fe06d55 | 116 | |
da63d5b3 | 117 | |
849224d4 | 118 | def gen_json_diff_report(d1, d2, exact=False, path="> $", acc=(0, "")): |
7bd28cfc | 119 | """ |
849224d4 | 120 | Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye. |
7bd28cfc | 121 | """ |
849224d4 G |
122 | |
123 | def dump_json(v): | |
124 | if isinstance(v, (dict, list)): | |
125 | return "\t" + "\t".join( | |
126 | json.dumps(v, indent=4, separators=(",", ": ")).splitlines(True) | |
787e7624 | 127 | ) |
849224d4 G |
128 | else: |
129 | return "'{}'".format(v) | |
130 | ||
131 | def json_type(v): | |
132 | if isinstance(v, (list, tuple)): | |
133 | return "Array" | |
134 | elif isinstance(v, dict): | |
135 | return "Object" | |
136 | elif isinstance(v, (int, float)): | |
137 | return "Number" | |
138 | elif isinstance(v, bool): | |
139 | return "Boolean" | |
140 | elif isinstance(v, str): | |
141 | return "String" | |
142 | elif v == None: | |
143 | return "null" | |
144 | ||
145 | def get_errors(other_acc): | |
146 | return other_acc[1] | |
147 | ||
148 | def get_errors_n(other_acc): | |
149 | return other_acc[0] | |
150 | ||
151 | def add_error(acc, msg, points=1): | |
152 | return (acc[0] + points, acc[1] + "{}: {}\n".format(path, msg)) | |
153 | ||
154 | def merge_errors(acc, other_acc): | |
155 | return (acc[0] + other_acc[0], acc[1] + other_acc[1]) | |
156 | ||
157 | def add_idx(idx): | |
158 | return "{}[{}]".format(path, idx) | |
159 | ||
160 | def add_key(key): | |
161 | return "{}->{}".format(path, key) | |
162 | ||
163 | def has_errors(other_acc): | |
164 | return other_acc[0] > 0 | |
165 | ||
166 | if d2 == "*" or ( | |
167 | not isinstance(d1, (list, dict)) | |
168 | and not isinstance(d2, (list, dict)) | |
169 | and d1 == d2 | |
170 | ): | |
171 | return acc | |
172 | elif ( | |
173 | not isinstance(d1, (list, dict)) | |
174 | and not isinstance(d2, (list, dict)) | |
175 | and d1 != d2 | |
176 | ): | |
177 | acc = add_error( | |
178 | acc, | |
179 | "d1 has element with value '{}' but in d2 it has value '{}'".format(d1, d2), | |
787e7624 | 180 | ) |
849224d4 G |
181 | elif ( |
182 | isinstance(d1, list) | |
183 | and isinstance(d2, list) | |
184 | and ((len(d2) > 0 and d2[0] == "__ordered__") or exact) | |
185 | ): | |
186 | if not exact: | |
187 | del d2[0] | |
188 | if len(d1) != len(d2): | |
189 | acc = add_error( | |
190 | acc, | |
191 | "d1 has Array of length {} but in d2 it is of length {}".format( | |
192 | len(d1), len(d2) | |
193 | ), | |
787e7624 | 194 | ) |
849224d4 G |
195 | else: |
196 | for idx, v1, v2 in zip(range(0, len(d1)), d1, d2): | |
197 | acc = merge_errors( | |
198 | acc, gen_json_diff_report(v1, v2, exact=exact, path=add_idx(idx)) | |
199 | ) | |
200 | elif isinstance(d1, list) and isinstance(d2, list): | |
201 | if len(d1) < len(d2): | |
202 | acc = add_error( | |
203 | acc, | |
204 | "d1 has Array of length {} but in d2 it is of length {}".format( | |
205 | len(d1), len(d2) | |
206 | ), | |
207 | ) | |
208 | else: | |
209 | for idx2, v2 in zip(range(0, len(d2)), d2): | |
210 | found_match = False | |
211 | closest_diff = None | |
212 | closest_idx = None | |
213 | for idx1, v1 in zip(range(0, len(d1)), d1): | |
b3100f6c G |
214 | tmp_v1 = deepcopy(v1) |
215 | tmp_v2 = deepcopy(v2) | |
216 | tmp_diff = gen_json_diff_report(tmp_v1, tmp_v2, path=add_idx(idx1)) | |
849224d4 G |
217 | if not has_errors(tmp_diff): |
218 | found_match = True | |
219 | del d1[idx1] | |
220 | break | |
221 | elif not closest_diff or get_errors_n(tmp_diff) < get_errors_n( | |
222 | closest_diff | |
223 | ): | |
224 | closest_diff = tmp_diff | |
225 | closest_idx = idx1 | |
226 | if not found_match and isinstance(v2, (list, dict)): | |
227 | sub_error = "\n\n\t{}".format( | |
228 | "\t".join(get_errors(closest_diff).splitlines(True)) | |
229 | ) | |
230 | acc = add_error( | |
231 | acc, | |
232 | ( | |
233 | "d2 has the following element at index {} which is not present in d1: " | |
234 | + "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}" | |
235 | ).format(idx2, dump_json(v2), closest_idx, sub_error), | |
236 | ) | |
237 | if not found_match and not isinstance(v2, (list, dict)): | |
238 | acc = add_error( | |
239 | acc, | |
240 | "d2 has the following element at index {} which is not present in d1: {}".format( | |
241 | idx2, dump_json(v2) | |
242 | ), | |
243 | ) | |
244 | elif isinstance(d1, dict) and isinstance(d2, dict) and exact: | |
245 | invalid_keys_d1 = [k for k in d1.keys() if k not in d2.keys()] | |
246 | invalid_keys_d2 = [k for k in d2.keys() if k not in d1.keys()] | |
247 | for k in invalid_keys_d1: | |
248 | acc = add_error(acc, "d1 has key '{}' which is not present in d2".format(k)) | |
249 | for k in invalid_keys_d2: | |
250 | acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k)) | |
251 | valid_keys_intersection = [k for k in d1.keys() if k in d2.keys()] | |
252 | for k in valid_keys_intersection: | |
253 | acc = merge_errors( | |
254 | acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k)) | |
255 | ) | |
256 | elif isinstance(d1, dict) and isinstance(d2, dict): | |
257 | none_keys = [k for k, v in d2.items() if v == None] | |
258 | none_keys_present = [k for k in d1.keys() if k in none_keys] | |
259 | for k in none_keys_present: | |
260 | acc = add_error( | |
261 | acc, "d1 has key '{}' which is not supposed to be present".format(k) | |
262 | ) | |
263 | keys = [k for k, v in d2.items() if v != None] | |
264 | invalid_keys_intersection = [k for k in keys if k not in d1.keys()] | |
265 | for k in invalid_keys_intersection: | |
266 | acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k)) | |
267 | valid_keys_intersection = [k for k in keys if k in d1.keys()] | |
268 | for k in valid_keys_intersection: | |
269 | acc = merge_errors( | |
270 | acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k)) | |
271 | ) | |
272 | else: | |
273 | acc = add_error( | |
274 | acc, | |
275 | "d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format( | |
276 | json_type(d1), json_type(d2) | |
277 | ), | |
278 | points=2, | |
787e7624 | 279 | ) |
a82e5f9a | 280 | |
849224d4 | 281 | return acc |
a82e5f9a | 282 | |
849224d4 G |
283 | |
284 | def json_cmp(d1, d2, exact=False): | |
09e21b44 RZ |
285 | """ |
286 | JSON compare function. Receives two parameters: | |
849224d4 G |
287 | * `d1`: parsed JSON data structure |
288 | * `d2`: parsed JSON data structure | |
289 | ||
290 | Returns 'None' when all JSON Object keys and all Array elements of d2 have a match | |
49581587 | 291 | in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an |
849224d4 G |
292 | error report is generated and wrapped in a 'json_cmp_result()'. There are special |
293 | parameters and notations explained below which can be used to cover rather unusual | |
294 | cases: | |
295 | ||
296 | * when 'exact is set to 'True' then d1 and d2 are tested for equality (including | |
297 | order within JSON Arrays) | |
298 | * using 'null' (or 'None' in Python) as JSON Object value is checking for key | |
299 | absence in d1 | |
300 | * using '*' as JSON Object value or Array value is checking for presence in d1 | |
301 | without checking the values | |
302 | * using '__ordered__' as first element in a JSON Array in d2 will also check the | |
303 | order when it is compared to an Array in d1 | |
09e21b44 | 304 | """ |
09e21b44 | 305 | |
849224d4 | 306 | (errors_n, errors) = gen_json_diff_report(deepcopy(d1), deepcopy(d2), exact=exact) |
3668ed8d | 307 | |
849224d4 G |
308 | if errors_n > 0: |
309 | result = json_cmp_result() | |
310 | result.add_error(errors) | |
3668ed8d | 311 | return result |
849224d4 G |
312 | else: |
313 | return None | |
09e21b44 | 314 | |
a82e5f9a | 315 | |
5cffda18 RZ |
316 | def router_output_cmp(router, cmd, expected): |
317 | """ | |
318 | Runs `cmd` in router and compares the output with `expected`. | |
319 | """ | |
787e7624 | 320 | return difflines( |
321 | normalize_text(router.vtysh_cmd(cmd)), | |
322 | normalize_text(expected), | |
323 | title1="Current output", | |
324 | title2="Expected output", | |
325 | ) | |
5cffda18 RZ |
326 | |
327 | ||
849224d4 | 328 | def router_json_cmp(router, cmd, data, exact=False): |
5cffda18 RZ |
329 | """ |
330 | Runs `cmd` that returns JSON data (normally the command ends with 'json') | |
331 | and compare with `data` contents. | |
332 | """ | |
849224d4 | 333 | return json_cmp(router.vtysh_cmd(cmd, isjson=True), data, exact) |
5cffda18 RZ |
334 | |
335 | ||
1fca63c1 RZ |
336 | def run_and_expect(func, what, count=20, wait=3): |
337 | """ | |
338 | Run `func` and compare the result with `what`. Do it for `count` times | |
339 | waiting `wait` seconds between tries. By default it tries 20 times with | |
340 | 3 seconds delay between tries. | |
341 | ||
342 | Returns (True, func-return) on success or | |
343 | (False, func-return) on failure. | |
5cffda18 RZ |
344 | |
345 | --- | |
346 | ||
347 | Helper functions to use with this function: | |
348 | - router_output_cmp | |
349 | - router_json_cmp | |
1fca63c1 | 350 | """ |
fd858290 RZ |
351 | start_time = time.time() |
352 | func_name = "<unknown>" | |
353 | if func.__class__ == functools.partial: | |
354 | func_name = func.func.__name__ | |
355 | else: | |
356 | func_name = func.__name__ | |
357 | ||
358 | logger.info( | |
8d3dab20 IR |
359 | "'{}' polling started (interval {} secs, maximum {} tries)".format( |
360 | func_name, wait, count | |
787e7624 | 361 | ) |
362 | ) | |
fd858290 | 363 | |
1fca63c1 RZ |
364 | while count > 0: |
365 | result = func() | |
366 | if result != what: | |
570f25d8 | 367 | time.sleep(wait) |
1fca63c1 RZ |
368 | count -= 1 |
369 | continue | |
fd858290 RZ |
370 | |
371 | end_time = time.time() | |
787e7624 | 372 | logger.info( |
373 | "'{}' succeeded after {:.2f} seconds".format( | |
374 | func_name, end_time - start_time | |
375 | ) | |
376 | ) | |
1fca63c1 | 377 | return (True, result) |
fd858290 RZ |
378 | |
379 | end_time = time.time() | |
787e7624 | 380 | logger.error( |
381 | "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time) | |
382 | ) | |
1fca63c1 RZ |
383 | return (False, result) |
384 | ||
385 | ||
a6fd124a RZ |
386 | def run_and_expect_type(func, etype, count=20, wait=3, avalue=None): |
387 | """ | |
388 | Run `func` and compare the result with `etype`. Do it for `count` times | |
389 | waiting `wait` seconds between tries. By default it tries 20 times with | |
390 | 3 seconds delay between tries. | |
391 | ||
392 | This function is used when you want to test the return type and, | |
393 | optionally, the return value. | |
394 | ||
395 | Returns (True, func-return) on success or | |
396 | (False, func-return) on failure. | |
397 | """ | |
398 | start_time = time.time() | |
399 | func_name = "<unknown>" | |
400 | if func.__class__ == functools.partial: | |
401 | func_name = func.func.__name__ | |
402 | else: | |
403 | func_name = func.__name__ | |
404 | ||
405 | logger.info( | |
406 | "'{}' polling started (interval {} secs, maximum wait {} secs)".format( | |
787e7624 | 407 | func_name, wait, int(wait * count) |
408 | ) | |
409 | ) | |
a6fd124a RZ |
410 | |
411 | while count > 0: | |
412 | result = func() | |
413 | if not isinstance(result, etype): | |
787e7624 | 414 | logger.debug( |
415 | "Expected result type '{}' got '{}' instead".format(etype, type(result)) | |
416 | ) | |
a6fd124a RZ |
417 | time.sleep(wait) |
418 | count -= 1 | |
419 | continue | |
420 | ||
421 | if etype != type(None) and avalue != None and result != avalue: | |
422 | logger.debug("Expected value '{}' got '{}' instead".format(avalue, result)) | |
423 | time.sleep(wait) | |
424 | count -= 1 | |
425 | continue | |
426 | ||
427 | end_time = time.time() | |
787e7624 | 428 | logger.info( |
429 | "'{}' succeeded after {:.2f} seconds".format( | |
430 | func_name, end_time - start_time | |
431 | ) | |
432 | ) | |
a6fd124a RZ |
433 | return (True, result) |
434 | ||
435 | end_time = time.time() | |
787e7624 | 436 | logger.error( |
437 | "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time) | |
438 | ) | |
a6fd124a RZ |
439 | return (False, result) |
440 | ||
441 | ||
594b1259 MW |
442 | def int2dpid(dpid): |
443 | "Converting Integer to DPID" | |
444 | ||
445 | try: | |
446 | dpid = hex(dpid)[2:] | |
787e7624 | 447 | dpid = "0" * (16 - len(dpid)) + dpid |
594b1259 MW |
448 | return dpid |
449 | except IndexError: | |
787e7624 | 450 | raise Exception( |
451 | "Unable to derive default datapath ID - " | |
452 | "please either specify a dpid or use a " | |
453 | "canonical switch name such as s23." | |
454 | ) | |
455 | ||
594b1259 | 456 | |
50c40bde MW |
457 | def pid_exists(pid): |
458 | "Check whether pid exists in the current process table." | |
459 | ||
460 | if pid <= 0: | |
461 | return False | |
f033a78a DL |
462 | try: |
463 | os.waitpid(pid, os.WNOHANG) | |
464 | except: | |
465 | pass | |
50c40bde MW |
466 | try: |
467 | os.kill(pid, 0) | |
468 | except OSError as err: | |
469 | if err.errno == errno.ESRCH: | |
470 | # ESRCH == No such process | |
471 | return False | |
472 | elif err.errno == errno.EPERM: | |
473 | # EPERM clearly means there's a process to deny access to | |
474 | return True | |
475 | else: | |
476 | # According to "man 2 kill" possible error values are | |
477 | # (EINVAL, EPERM, ESRCH) | |
478 | raise | |
479 | else: | |
480 | return True | |
481 | ||
787e7624 | 482 | |
bc2872fd | 483 | def get_textdiff(text1, text2, title1="", title2="", **opts): |
17070436 MW |
484 | "Returns empty string if same or formatted diff" |
485 | ||
787e7624 | 486 | diff = "\n".join( |
487 | difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts) | |
488 | ) | |
17070436 MW |
489 | # Clean up line endings |
490 | diff = os.linesep.join([s for s in diff.splitlines() if s]) | |
491 | return diff | |
492 | ||
787e7624 | 493 | |
494 | def difflines(text1, text2, title1="", title2="", **opts): | |
1fca63c1 | 495 | "Wrapper for get_textdiff to avoid string transformations." |
787e7624 | 496 | text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1) |
497 | text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1) | |
bc2872fd | 498 | return get_textdiff(text1, text2, title1, title2, **opts) |
1fca63c1 | 499 | |
787e7624 | 500 | |
1fca63c1 RZ |
501 | def get_file(content): |
502 | """ | |
503 | Generates a temporary file in '/tmp' with `content` and returns the file name. | |
504 | """ | |
49581587 CH |
505 | if isinstance(content, list) or isinstance(content, tuple): |
506 | content = "\n".join(content) | |
787e7624 | 507 | fde = tempfile.NamedTemporaryFile(mode="w", delete=False) |
1fca63c1 RZ |
508 | fname = fde.name |
509 | fde.write(content) | |
510 | fde.close() | |
511 | return fname | |
512 | ||
787e7624 | 513 | |
f7840f6b RZ |
514 | def normalize_text(text): |
515 | """ | |
9683a1bb | 516 | Strips formating spaces/tabs, carriage returns and trailing whitespace. |
f7840f6b | 517 | """ |
787e7624 | 518 | text = re.sub(r"[ \t]+", " ", text) |
519 | text = re.sub(r"\r", "", text) | |
9683a1bb RZ |
520 | |
521 | # Remove whitespace in the middle of text. | |
787e7624 | 522 | text = re.sub(r"[ \t]+\n", "\n", text) |
9683a1bb RZ |
523 | # Remove whitespace at the end of the text. |
524 | text = text.rstrip() | |
525 | ||
f7840f6b RZ |
526 | return text |
527 | ||
787e7624 | 528 | |
0414a764 DS |
529 | def is_linux(): |
530 | """ | |
531 | Parses unix name output to check if running on GNU/Linux. | |
532 | ||
533 | Returns True if running on Linux, returns False otherwise. | |
534 | """ | |
535 | ||
536 | if os.uname()[0] == "Linux": | |
537 | return True | |
538 | return False | |
539 | ||
540 | ||
541 | def iproute2_is_vrf_capable(): | |
542 | """ | |
543 | Checks if the iproute2 version installed on the system is capable of | |
544 | handling VRFs by interpreting the output of the 'ip' utility found in PATH. | |
545 | ||
546 | Returns True if capability can be detected, returns False otherwise. | |
547 | """ | |
548 | ||
549 | if is_linux(): | |
550 | try: | |
551 | subp = subprocess.Popen( | |
552 | ["ip", "route", "show", "vrf"], | |
553 | stdout=subprocess.PIPE, | |
554 | stderr=subprocess.PIPE, | |
0b25370e | 555 | stdin=subprocess.PIPE, |
0414a764 DS |
556 | ) |
557 | iproute2_err = subp.communicate()[1].splitlines()[0].split()[0] | |
558 | ||
559 | if iproute2_err != "Error:": | |
560 | return True | |
561 | except Exception: | |
562 | pass | |
563 | return False | |
564 | ||
565 | ||
cc95fbd9 | 566 | def module_present_linux(module, load): |
f2d6ce41 CF |
567 | """ |
568 | Returns whether `module` is present. | |
569 | ||
570 | If `load` is true, it will try to load it via modprobe. | |
571 | """ | |
787e7624 | 572 | with open("/proc/modules", "r") as modules_file: |
573 | if module.replace("-", "_") in modules_file.read(): | |
f2d6ce41 | 574 | return True |
787e7624 | 575 | cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module) |
f2d6ce41 CF |
576 | if os.system(cmd) != 0: |
577 | return False | |
578 | else: | |
579 | return True | |
580 | ||
787e7624 | 581 | |
cc95fbd9 DS |
582 | def module_present_freebsd(module, load): |
583 | return True | |
584 | ||
787e7624 | 585 | |
cc95fbd9 DS |
586 | def module_present(module, load=True): |
587 | if sys.platform.startswith("linux"): | |
28440fd9 | 588 | return module_present_linux(module, load) |
cc95fbd9 | 589 | elif sys.platform.startswith("freebsd"): |
28440fd9 | 590 | return module_present_freebsd(module, load) |
cc95fbd9 | 591 | |
787e7624 | 592 | |
4190fe1e RZ |
593 | def version_cmp(v1, v2): |
594 | """ | |
595 | Compare two version strings and returns: | |
596 | ||
597 | * `-1`: if `v1` is less than `v2` | |
598 | * `0`: if `v1` is equal to `v2` | |
599 | * `1`: if `v1` is greater than `v2` | |
600 | ||
601 | Raises `ValueError` if versions are not well formated. | |
602 | """ | |
787e7624 | 603 | vregex = r"(?P<whole>\d+(\.(\d+))*)" |
4190fe1e RZ |
604 | v1m = re.match(vregex, v1) |
605 | v2m = re.match(vregex, v2) | |
606 | if v1m is None or v2m is None: | |
607 | raise ValueError("got a invalid version string") | |
608 | ||
609 | # Split values | |
787e7624 | 610 | v1g = v1m.group("whole").split(".") |
611 | v2g = v2m.group("whole").split(".") | |
4190fe1e RZ |
612 | |
613 | # Get the longest version string | |
614 | vnum = len(v1g) | |
615 | if len(v2g) > vnum: | |
616 | vnum = len(v2g) | |
617 | ||
618 | # Reverse list because we are going to pop the tail | |
619 | v1g.reverse() | |
620 | v2g.reverse() | |
621 | for _ in range(vnum): | |
622 | try: | |
623 | v1n = int(v1g.pop()) | |
624 | except IndexError: | |
625 | while v2g: | |
626 | v2n = int(v2g.pop()) | |
627 | if v2n > 0: | |
628 | return -1 | |
629 | break | |
630 | ||
631 | try: | |
632 | v2n = int(v2g.pop()) | |
633 | except IndexError: | |
634 | if v1n > 0: | |
635 | return 1 | |
636 | while v1g: | |
637 | v1n = int(v1g.pop()) | |
638 | if v1n > 0: | |
034237db | 639 | return 1 |
4190fe1e RZ |
640 | break |
641 | ||
642 | if v1n > v2n: | |
643 | return 1 | |
644 | if v1n < v2n: | |
645 | return -1 | |
646 | return 0 | |
647 | ||
787e7624 | 648 | |
f5612168 PG |
649 | def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None): |
650 | if ifaceaction: | |
787e7624 | 651 | str_ifaceaction = "no shutdown" |
f5612168 | 652 | else: |
787e7624 | 653 | str_ifaceaction = "shutdown" |
f5612168 | 654 | if vrf_name == None: |
787e7624 | 655 | cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format( |
656 | ifacename, str_ifaceaction | |
657 | ) | |
f5612168 | 658 | else: |
9fa6ec14 | 659 | cmd = ( |
660 | 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format( | |
661 | ifacename, vrf_name, str_ifaceaction | |
662 | ) | |
787e7624 | 663 | ) |
f5612168 PG |
664 | node.run(cmd) |
665 | ||
787e7624 | 666 | |
b220b3c8 PG |
667 | def ip4_route_zebra(node, vrf_name=None): |
668 | """ | |
669 | Gets an output of 'show ip route' command. It can be used | |
670 | with comparing the output to a reference | |
671 | """ | |
672 | if vrf_name == None: | |
787e7624 | 673 | tmp = node.vtysh_cmd("show ip route") |
b220b3c8 | 674 | else: |
787e7624 | 675 | tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name)) |
b220b3c8 | 676 | output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp) |
41077aa1 CF |
677 | |
678 | lines = output.splitlines() | |
679 | header_found = False | |
0eff5820 | 680 | while lines and (not lines[0].strip() or not header_found): |
5a3cf853 | 681 | if "o - offload failure" in lines[0]: |
41077aa1 CF |
682 | header_found = True |
683 | lines = lines[1:] | |
787e7624 | 684 | return "\n".join(lines) |
685 | ||
b220b3c8 | 686 | |
e394d9aa MS |
687 | def ip6_route_zebra(node, vrf_name=None): |
688 | """ | |
689 | Retrieves the output of 'show ipv6 route [vrf vrf_name]', then | |
690 | canonicalizes it by eliding link-locals. | |
691 | """ | |
692 | ||
693 | if vrf_name == None: | |
787e7624 | 694 | tmp = node.vtysh_cmd("show ipv6 route") |
e394d9aa | 695 | else: |
787e7624 | 696 | tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name)) |
e394d9aa MS |
697 | |
698 | # Mask out timestamp | |
699 | output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp) | |
700 | ||
701 | # Mask out the link-local addresses | |
787e7624 | 702 | output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output) |
e394d9aa MS |
703 | |
704 | lines = output.splitlines() | |
705 | header_found = False | |
706 | while lines and (not lines[0].strip() or not header_found): | |
5a3cf853 | 707 | if "o - offload failure" in lines[0]: |
e394d9aa MS |
708 | header_found = True |
709 | lines = lines[1:] | |
710 | ||
787e7624 | 711 | return "\n".join(lines) |
e394d9aa MS |
712 | |
713 | ||
2f726781 MW |
714 | def proto_name_to_number(protocol): |
715 | return { | |
787e7624 | 716 | "bgp": "186", |
717 | "isis": "187", | |
718 | "ospf": "188", | |
719 | "rip": "189", | |
720 | "ripng": "190", | |
721 | "nhrp": "191", | |
722 | "eigrp": "192", | |
723 | "ldp": "193", | |
724 | "sharp": "194", | |
725 | "pbr": "195", | |
726 | "static": "196", | |
727 | }.get( | |
728 | protocol, protocol | |
729 | ) # default return same as input | |
2f726781 MW |
730 | |
731 | ||
99a7a912 RZ |
732 | def ip4_route(node): |
733 | """ | |
734 | Gets a structured return of the command 'ip route'. It can be used in | |
735 | conjuction with json_cmp() to provide accurate assert explanations. | |
736 | ||
737 | Return example: | |
738 | { | |
739 | '10.0.1.0/24': { | |
740 | 'dev': 'eth0', | |
741 | 'via': '172.16.0.1', | |
742 | 'proto': '188', | |
743 | }, | |
744 | '10.0.2.0/24': { | |
745 | 'dev': 'eth1', | |
746 | 'proto': 'kernel', | |
747 | } | |
748 | } | |
749 | """ | |
787e7624 | 750 | output = normalize_text(node.run("ip route")).splitlines() |
99a7a912 RZ |
751 | result = {} |
752 | for line in output: | |
787e7624 | 753 | columns = line.split(" ") |
99a7a912 RZ |
754 | route = result[columns[0]] = {} |
755 | prev = None | |
756 | for column in columns: | |
787e7624 | 757 | if prev == "dev": |
758 | route["dev"] = column | |
759 | if prev == "via": | |
760 | route["via"] = column | |
761 | if prev == "proto": | |
2f726781 | 762 | # translate protocol names back to numbers |
787e7624 | 763 | route["proto"] = proto_name_to_number(column) |
764 | if prev == "metric": | |
765 | route["metric"] = column | |
766 | if prev == "scope": | |
767 | route["scope"] = column | |
99a7a912 RZ |
768 | prev = column |
769 | ||
770 | return result | |
771 | ||
787e7624 | 772 | |
9375b5aa | 773 | def ip4_vrf_route(node): |
774 | """ | |
775 | Gets a structured return of the command 'ip route show vrf {0}-cust1'. | |
776 | It can be used in conjuction with json_cmp() to provide accurate assert explanations. | |
777 | ||
778 | Return example: | |
779 | { | |
780 | '10.0.1.0/24': { | |
781 | 'dev': 'eth0', | |
782 | 'via': '172.16.0.1', | |
783 | 'proto': '188', | |
784 | }, | |
785 | '10.0.2.0/24': { | |
786 | 'dev': 'eth1', | |
787 | 'proto': 'kernel', | |
788 | } | |
789 | } | |
790 | """ | |
791 | output = normalize_text( | |
701a0192 | 792 | node.run("ip route show vrf {0}-cust1".format(node.name)) |
793 | ).splitlines() | |
9375b5aa | 794 | |
795 | result = {} | |
796 | for line in output: | |
797 | columns = line.split(" ") | |
798 | route = result[columns[0]] = {} | |
799 | prev = None | |
800 | for column in columns: | |
801 | if prev == "dev": | |
802 | route["dev"] = column | |
803 | if prev == "via": | |
804 | route["via"] = column | |
805 | if prev == "proto": | |
806 | # translate protocol names back to numbers | |
807 | route["proto"] = proto_name_to_number(column) | |
808 | if prev == "metric": | |
809 | route["metric"] = column | |
810 | if prev == "scope": | |
811 | route["scope"] = column | |
812 | prev = column | |
813 | ||
814 | return result | |
815 | ||
816 | ||
99a7a912 RZ |
817 | def ip6_route(node): |
818 | """ | |
819 | Gets a structured return of the command 'ip -6 route'. It can be used in | |
820 | conjuction with json_cmp() to provide accurate assert explanations. | |
821 | ||
822 | Return example: | |
823 | { | |
824 | '2001:db8:1::/64': { | |
825 | 'dev': 'eth0', | |
826 | 'proto': '188', | |
827 | }, | |
828 | '2001:db8:2::/64': { | |
829 | 'dev': 'eth1', | |
830 | 'proto': 'kernel', | |
831 | } | |
832 | } | |
833 | """ | |
787e7624 | 834 | output = normalize_text(node.run("ip -6 route")).splitlines() |
99a7a912 RZ |
835 | result = {} |
836 | for line in output: | |
787e7624 | 837 | columns = line.split(" ") |
99a7a912 RZ |
838 | route = result[columns[0]] = {} |
839 | prev = None | |
840 | for column in columns: | |
787e7624 | 841 | if prev == "dev": |
842 | route["dev"] = column | |
843 | if prev == "via": | |
844 | route["via"] = column | |
845 | if prev == "proto": | |
2f726781 | 846 | # translate protocol names back to numbers |
787e7624 | 847 | route["proto"] = proto_name_to_number(column) |
848 | if prev == "metric": | |
849 | route["metric"] = column | |
850 | if prev == "pref": | |
851 | route["pref"] = column | |
99a7a912 RZ |
852 | prev = column |
853 | ||
854 | return result | |
855 | ||
787e7624 | 856 | |
9375b5aa | 857 | def ip6_vrf_route(node): |
858 | """ | |
859 | Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'. | |
860 | It can be used in conjuction with json_cmp() to provide accurate assert explanations. | |
861 | ||
862 | Return example: | |
863 | { | |
864 | '2001:db8:1::/64': { | |
865 | 'dev': 'eth0', | |
866 | 'proto': '188', | |
867 | }, | |
868 | '2001:db8:2::/64': { | |
869 | 'dev': 'eth1', | |
870 | 'proto': 'kernel', | |
871 | } | |
872 | } | |
873 | """ | |
874 | output = normalize_text( | |
701a0192 | 875 | node.run("ip -6 route show vrf {0}-cust1".format(node.name)) |
876 | ).splitlines() | |
9375b5aa | 877 | result = {} |
878 | for line in output: | |
879 | columns = line.split(" ") | |
880 | route = result[columns[0]] = {} | |
881 | prev = None | |
882 | for column in columns: | |
883 | if prev == "dev": | |
884 | route["dev"] = column | |
885 | if prev == "via": | |
886 | route["via"] = column | |
887 | if prev == "proto": | |
888 | # translate protocol names back to numbers | |
889 | route["proto"] = proto_name_to_number(column) | |
890 | if prev == "metric": | |
891 | route["metric"] = column | |
892 | if prev == "pref": | |
893 | route["pref"] = column | |
894 | prev = column | |
895 | ||
896 | return result | |
897 | ||
898 | ||
9b7decf2 JU |
899 | def ip_rules(node): |
900 | """ | |
901 | Gets a structured return of the command 'ip rule'. It can be used in | |
902 | conjuction with json_cmp() to provide accurate assert explanations. | |
903 | ||
904 | Return example: | |
905 | [ | |
906 | { | |
907 | "pref": "0" | |
908 | "from": "all" | |
909 | }, | |
910 | { | |
911 | "pref": "32766" | |
912 | "from": "all" | |
913 | }, | |
914 | { | |
915 | "to": "3.4.5.0/24", | |
916 | "iif": "r1-eth2", | |
917 | "pref": "304", | |
918 | "from": "1.2.0.0/16", | |
919 | "proto": "zebra" | |
920 | } | |
921 | ] | |
922 | """ | |
923 | output = normalize_text(node.run("ip rule")).splitlines() | |
924 | result = [] | |
925 | for line in output: | |
926 | columns = line.split(" ") | |
927 | ||
928 | route = {} | |
929 | # remove last character, since it is ':' | |
930 | pref = columns[0][:-1] | |
931 | route["pref"] = pref | |
932 | prev = None | |
933 | for column in columns: | |
934 | if prev == "from": | |
935 | route["from"] = column | |
936 | if prev == "to": | |
937 | route["to"] = column | |
938 | if prev == "proto": | |
939 | route["proto"] = column | |
940 | if prev == "iif": | |
941 | route["iif"] = column | |
942 | if prev == "fwmark": | |
943 | route["fwmark"] = column | |
944 | prev = column | |
945 | ||
946 | result.append(route) | |
947 | return result | |
948 | ||
949 | ||
570f25d8 RZ |
950 | def sleep(amount, reason=None): |
951 | """ | |
952 | Sleep wrapper that registers in the log the amount of sleep | |
953 | """ | |
954 | if reason is None: | |
787e7624 | 955 | logger.info("Sleeping for {} seconds".format(amount)) |
570f25d8 | 956 | else: |
787e7624 | 957 | logger.info(reason + " ({} seconds)".format(amount)) |
570f25d8 RZ |
958 | |
959 | time.sleep(amount) | |
960 | ||
787e7624 | 961 | |
be2656ed | 962 | def checkAddressSanitizerError(output, router, component, logdir=""): |
4942f298 MW |
963 | "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise" |
964 | ||
be2656ed | 965 | def processAddressSanitizerError(asanErrorRe, output, router, component): |
787e7624 | 966 | sys.stderr.write( |
967 | "%s: %s triggered an exception by AddressSanitizer\n" % (router, component) | |
968 | ) | |
4942f298 | 969 | # Sanitizer Error found in log |
be2656ed | 970 | pidMark = asanErrorRe.group(1) |
6ee4440e | 971 | addressSanitizerLog = re.search( |
787e7624 | 972 | "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL |
973 | ) | |
6ee4440e | 974 | if addressSanitizerLog: |
be2656ed | 975 | # Find Calling Test. Could be multiple steps back |
9fa6ec14 | 976 | testframe = sys._current_frames().values()[0] |
977 | level = 0 | |
be2656ed | 978 | while level < 10: |
9fa6ec14 | 979 | test = os.path.splitext( |
980 | os.path.basename(testframe.f_globals["__file__"]) | |
981 | )[0] | |
be2656ed MW |
982 | if (test != "topotest") and (test != "topogen"): |
983 | # Found the calling test | |
9fa6ec14 | 984 | callingTest = os.path.basename(testframe.f_globals["__file__"]) |
be2656ed | 985 | break |
9fa6ec14 | 986 | level = level + 1 |
987 | testframe = testframe.f_back | |
988 | if level >= 10: | |
be2656ed | 989 | # somehow couldn't find the test script. |
9fa6ec14 | 990 | callingTest = "unknownTest" |
be2656ed MW |
991 | # |
992 | # Now finding Calling Procedure | |
9fa6ec14 | 993 | level = 0 |
be2656ed | 994 | while level < 20: |
9fa6ec14 | 995 | callingProc = sys._getframe(level).f_code.co_name |
996 | if ( | |
997 | (callingProc != "processAddressSanitizerError") | |
998 | and (callingProc != "checkAddressSanitizerError") | |
999 | and (callingProc != "checkRouterCores") | |
1000 | and (callingProc != "stopRouter") | |
9fa6ec14 | 1001 | and (callingProc != "stop") |
1002 | and (callingProc != "stop_topology") | |
1003 | and (callingProc != "checkRouterRunning") | |
1004 | and (callingProc != "check_router_running") | |
1005 | and (callingProc != "routers_have_failure") | |
1006 | ): | |
be2656ed MW |
1007 | # Found the calling test |
1008 | break | |
9fa6ec14 | 1009 | level = level + 1 |
1010 | if level >= 20: | |
be2656ed | 1011 | # something wrong - couldn't found the calling test function |
9fa6ec14 | 1012 | callingProc = "unknownProc" |
4942f298 | 1013 | with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile: |
be2656ed MW |
1014 | sys.stderr.write( |
1015 | "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" | |
1016 | % (callingTest, callingProc, router) | |
1017 | ) | |
787e7624 | 1018 | sys.stderr.write( |
6ee4440e | 1019 | "\n".join(addressSanitizerLog.group(1).splitlines()) + "\n" |
787e7624 | 1020 | ) |
be2656ed | 1021 | addrSanFile.write("## Error: %s\n\n" % asanErrorRe.group(2)) |
787e7624 | 1022 | addrSanFile.write( |
1023 | "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" | |
1024 | % (callingTest, callingProc, router) | |
1025 | ) | |
1026 | addrSanFile.write( | |
1027 | " " | |
6ee4440e | 1028 | + "\n ".join(addressSanitizerLog.group(1).splitlines()) |
787e7624 | 1029 | + "\n" |
1030 | ) | |
4942f298 | 1031 | addrSanFile.write("\n---------------\n") |
be2656ed MW |
1032 | return |
1033 | ||
6ee4440e | 1034 | addressSanitizerError = re.search( |
49581587 | 1035 | r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output |
be2656ed | 1036 | ) |
6ee4440e MS |
1037 | if addressSanitizerError: |
1038 | processAddressSanitizerError(addressSanitizerError, output, router, component) | |
4942f298 | 1039 | return True |
be2656ed MW |
1040 | |
1041 | # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file | |
1042 | if logdir: | |
9fa6ec14 | 1043 | filepattern = logdir + "/" + router + "/" + component + ".asan.*" |
1044 | logger.debug( | |
1045 | "Log check for %s on %s, pattern %s\n" % (component, router, filepattern) | |
1046 | ) | |
be2656ed MW |
1047 | for file in glob.glob(filepattern): |
1048 | with open(file, "r") as asanErrorFile: | |
9fa6ec14 | 1049 | asanError = asanErrorFile.read() |
6ee4440e | 1050 | addressSanitizerError = re.search( |
49581587 | 1051 | r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError |
9fa6ec14 | 1052 | ) |
6ee4440e | 1053 | if addressSanitizerError: |
9fa6ec14 | 1054 | processAddressSanitizerError( |
1055 | addressSanitizerError, asanError, router, component | |
1056 | ) | |
be2656ed | 1057 | return True |
6c131bd3 | 1058 | return False |
4942f298 | 1059 | |
787e7624 | 1060 | |
49581587 CH |
1061 | def _sysctl_atleast(commander, variable, min_value): |
1062 | if isinstance(min_value, tuple): | |
1063 | min_value = list(min_value) | |
1064 | is_list = isinstance(min_value, list) | |
594b1259 | 1065 | |
49581587 CH |
1066 | sval = commander.cmd_raises("sysctl -n " + variable).strip() |
1067 | if is_list: | |
1068 | cur_val = [int(x) for x in sval.split()] | |
1069 | else: | |
1070 | cur_val = int(sval) | |
1071 | ||
1072 | set_value = False | |
1073 | if is_list: | |
1074 | for i, v in enumerate(cur_val): | |
1075 | if v < min_value[i]: | |
1076 | set_value = True | |
1077 | else: | |
1078 | min_value[i] = v | |
1079 | else: | |
1080 | if cur_val < min_value: | |
1081 | set_value = True | |
1082 | if set_value: | |
1083 | if is_list: | |
1084 | valstr = " ".join([str(x) for x in min_value]) | |
1085 | else: | |
1086 | valstr = str(min_value) | |
1087 | logger.info("Increasing sysctl %s from %s to %s", variable, cur_val, valstr) | |
a53c08bc | 1088 | commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr)) |
594b1259 | 1089 | |
787e7624 | 1090 | |
49581587 CH |
1091 | def _sysctl_assure(commander, variable, value): |
1092 | if isinstance(value, tuple): | |
1093 | value = list(value) | |
1094 | is_list = isinstance(value, list) | |
797e8dcf | 1095 | |
49581587 CH |
1096 | sval = commander.cmd_raises("sysctl -n " + variable).strip() |
1097 | if is_list: | |
1098 | cur_val = [int(x) for x in sval.split()] | |
1099 | else: | |
1100 | cur_val = sval | |
797e8dcf | 1101 | |
49581587 CH |
1102 | set_value = False |
1103 | if is_list: | |
1104 | for i, v in enumerate(cur_val): | |
1105 | if v != value[i]: | |
1106 | set_value = True | |
1107 | else: | |
1108 | value[i] = v | |
1109 | else: | |
1110 | if cur_val != str(value): | |
1111 | set_value = True | |
1112 | ||
1113 | if set_value: | |
1114 | if is_list: | |
1115 | valstr = " ".join([str(x) for x in value]) | |
1116 | else: | |
1117 | valstr = str(value) | |
1118 | logger.info("Changing sysctl %s from %s to %s", variable, cur_val, valstr) | |
a53c08bc | 1119 | commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr)) |
49581587 CH |
1120 | |
1121 | ||
1122 | def sysctl_atleast(commander, variable, min_value, raises=False): | |
1123 | try: | |
1124 | if commander is None: | |
1125 | commander = micronet.Commander("topotest") | |
1126 | return _sysctl_atleast(commander, variable, min_value) | |
1127 | except subprocess.CalledProcessError as error: | |
1128 | logger.warning( | |
1129 | "%s: Failed to assure sysctl min value %s = %s", | |
a53c08bc CH |
1130 | commander, |
1131 | variable, | |
1132 | min_value, | |
49581587 CH |
1133 | ) |
1134 | if raises: | |
1135 | raise | |
797e8dcf | 1136 | |
787e7624 | 1137 | |
49581587 CH |
1138 | def sysctl_assure(commander, variable, value, raises=False): |
1139 | try: | |
1140 | if commander is None: | |
1141 | commander = micronet.Commander("topotest") | |
1142 | return _sysctl_assure(commander, variable, value) | |
1143 | except subprocess.CalledProcessError as error: | |
1144 | logger.warning( | |
1145 | "%s: Failed to assure sysctl value %s = %s", | |
a53c08bc CH |
1146 | commander, |
1147 | variable, | |
1148 | value, | |
1149 | exc_info=True, | |
49581587 CH |
1150 | ) |
1151 | if raises: | |
1152 | raise | |
1153 | ||
1154 | ||
1155 | def rlimit_atleast(rname, min_value, raises=False): | |
1156 | try: | |
1157 | cval = resource.getrlimit(rname) | |
1158 | soft, hard = cval | |
1159 | if soft < min_value: | |
1160 | nval = (min_value, hard if min_value < hard else min_value) | |
1161 | logger.info("Increasing rlimit %s from %s to %s", rname, cval, nval) | |
1162 | resource.setrlimit(rname, nval) | |
1163 | except subprocess.CalledProcessError as error: | |
1164 | logger.warning( | |
a53c08bc | 1165 | "Failed to assure rlimit [%s] = %s", rname, min_value, exc_info=True |
49581587 CH |
1166 | ) |
1167 | if raises: | |
1168 | raise | |
1169 | ||
1170 | ||
1171 | def fix_netns_limits(ns): | |
1172 | ||
1173 | # Maximum read and write socket buffer sizes | |
a53c08bc CH |
1174 | sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20]) |
1175 | sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20]) | |
49581587 CH |
1176 | |
1177 | sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0) | |
1178 | sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0) | |
1179 | sysctl_assure(ns, "net.ipv4.conf.lo.rp_filter", 0) | |
1180 | ||
1181 | sysctl_assure(ns, "net.ipv4.conf.all.forwarding", 1) | |
1182 | sysctl_assure(ns, "net.ipv4.conf.default.forwarding", 1) | |
1183 | ||
1184 | # XXX if things fail look here as this wasn't done previously | |
1185 | sysctl_assure(ns, "net.ipv6.conf.all.forwarding", 1) | |
1186 | sysctl_assure(ns, "net.ipv6.conf.default.forwarding", 1) | |
1187 | ||
1188 | # ARP | |
1189 | sysctl_assure(ns, "net.ipv4.conf.default.arp_announce", 2) | |
1190 | sysctl_assure(ns, "net.ipv4.conf.default.arp_notify", 1) | |
1191 | # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for | |
1192 | sysctl_assure(ns, "net.ipv4.conf.default.arp_ignore", 0) | |
1193 | sysctl_assure(ns, "net.ipv4.conf.all.arp_announce", 2) | |
1194 | sysctl_assure(ns, "net.ipv4.conf.all.arp_notify", 1) | |
1195 | # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for | |
1196 | sysctl_assure(ns, "net.ipv4.conf.all.arp_ignore", 0) | |
1197 | ||
1198 | sysctl_assure(ns, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1) | |
1199 | ||
1200 | # Keep ipv6 permanent addresses on an admin down | |
1201 | sysctl_assure(ns, "net.ipv6.conf.all.keep_addr_on_down", 1) | |
1202 | if version_cmp(platform.release(), "4.20") >= 0: | |
1203 | sysctl_assure(ns, "net.ipv6.route.skip_notify_on_dev_down", 1) | |
1204 | ||
1205 | sysctl_assure(ns, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1) | |
1206 | sysctl_assure(ns, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1) | |
1207 | ||
1208 | # igmp | |
1209 | sysctl_atleast(ns, "net.ipv4.igmp_max_memberships", 1000) | |
1210 | ||
1211 | # Use neigh information on selection of nexthop for multipath hops | |
1212 | sysctl_assure(ns, "net.ipv4.fib_multipath_use_neigh", 1) | |
1213 | ||
1214 | ||
1215 | def fix_host_limits(): | |
1216 | """Increase system limits.""" | |
1217 | ||
a53c08bc CH |
1218 | rlimit_atleast(resource.RLIMIT_NPROC, 8 * 1024) |
1219 | rlimit_atleast(resource.RLIMIT_NOFILE, 16 * 1024) | |
1220 | sysctl_atleast(None, "fs.file-max", 16 * 1024) | |
1221 | sysctl_atleast(None, "kernel.pty.max", 16 * 1024) | |
49581587 CH |
1222 | |
1223 | # Enable coredumps | |
1224 | # Original on ubuntu 17.x, but apport won't save as in namespace | |
1225 | # |/usr/share/apport/apport %p %s %c %d %P | |
1226 | sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp") | |
1227 | sysctl_assure(None, "kernel.core_uses_pid", 1) | |
1228 | sysctl_assure(None, "fs.suid_dumpable", 1) | |
1229 | ||
1230 | # Maximum connection backlog | |
a53c08bc | 1231 | sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024) |
49581587 CH |
1232 | |
1233 | # Maximum read and write socket buffer sizes | |
a53c08bc CH |
1234 | sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20) |
1235 | sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20) | |
49581587 CH |
1236 | |
1237 | # Garbage Collection Settings for ARP and Neighbors | |
a53c08bc CH |
1238 | sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024) |
1239 | sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024) | |
1240 | sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024) | |
1241 | sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024) | |
49581587 | 1242 | # Hold entries for 10 minutes |
a53c08bc CH |
1243 | sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000) |
1244 | sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000) | |
49581587 CH |
1245 | |
1246 | # igmp | |
1247 | sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10) | |
1248 | ||
1249 | # MLD | |
1250 | sysctl_atleast(None, "net.ipv6.mld_max_msf", 512) | |
1251 | ||
1252 | # Increase routing table size to 128K | |
a53c08bc CH |
1253 | sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024) |
1254 | sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024) | |
49581587 CH |
1255 | |
1256 | ||
1257 | def setup_node_tmpdir(logdir, name): | |
1258 | # Cleanup old log, valgrind, and core files. | |
1259 | subprocess.check_call( | |
a53c08bc | 1260 | "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir, name), shell=True |
49581587 CH |
1261 | ) |
1262 | ||
1263 | # Setup the per node directory. | |
1264 | nodelogdir = "{}/{}".format(logdir, name) | |
a53c08bc CH |
1265 | subprocess.check_call( |
1266 | "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True | |
1267 | ) | |
49581587 CH |
1268 | logfile = "{0}/{1}.log".format(logdir, name) |
1269 | return logfile | |
797e8dcf | 1270 | |
594b1259 MW |
1271 | |
1272 | class Router(Node): | |
622c4996 | 1273 | "A Node with IPv4/IPv6 forwarding enabled" |
594b1259 | 1274 | |
2ab85530 | 1275 | def __init__(self, name, **params): |
0d5e41c6 | 1276 | |
04ce2b97 RZ |
1277 | # Backward compatibility: |
1278 | # Load configuration defaults like topogen. | |
787e7624 | 1279 | self.config_defaults = configparser.ConfigParser( |
701a0192 | 1280 | defaults={ |
787e7624 | 1281 | "verbosity": "info", |
1282 | "frrdir": "/usr/lib/frr", | |
787e7624 | 1283 | "routertype": "frr", |
11761ab0 | 1284 | "memleak_path": "", |
787e7624 | 1285 | } |
1286 | ) | |
49581587 | 1287 | |
04ce2b97 | 1288 | self.config_defaults.read( |
787e7624 | 1289 | os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini") |
04ce2b97 RZ |
1290 | ) |
1291 | ||
0d5e41c6 RZ |
1292 | # If this topology is using old API and doesn't have logdir |
1293 | # specified, then attempt to generate an unique logdir. | |
49581587 | 1294 | self.logdir = params.get("logdir") |
0d5e41c6 | 1295 | if self.logdir is None: |
49581587 CH |
1296 | self.logdir = get_logs_path(g_extra_config["rundir"]) |
1297 | ||
1298 | if not params.get("logger"): | |
1299 | # If logger is present topogen has already set this up | |
1300 | logfile = setup_node_tmpdir(self.logdir, name) | |
1301 | l = topolog.get_logger(name, log_level="debug", target=logfile) | |
1302 | params["logger"] = l | |
1303 | ||
1304 | super(Router, self).__init__(name, **params) | |
0d5e41c6 | 1305 | |
2ab85530 | 1306 | self.daemondir = None |
447f2d5a | 1307 | self.hasmpls = False |
787e7624 | 1308 | self.routertype = "frr" |
1309 | self.daemons = { | |
1310 | "zebra": 0, | |
1311 | "ripd": 0, | |
1312 | "ripngd": 0, | |
1313 | "ospfd": 0, | |
1314 | "ospf6d": 0, | |
1315 | "isisd": 0, | |
1316 | "bgpd": 0, | |
1317 | "pimd": 0, | |
1318 | "ldpd": 0, | |
1319 | "eigrpd": 0, | |
1320 | "nhrpd": 0, | |
1321 | "staticd": 0, | |
1322 | "bfdd": 0, | |
1323 | "sharpd": 0, | |
a0764a36 | 1324 | "babeld": 0, |
223f87f4 | 1325 | "pbrd": 0, |
92be50e6 BC |
1326 | "pathd": 0, |
1327 | "snmpd": 0, | |
787e7624 | 1328 | } |
1329 | self.daemons_options = {"zebra": ""} | |
2a59a86b | 1330 | self.reportCores = True |
fb80b81b | 1331 | self.version = None |
2ab85530 | 1332 | |
49581587 | 1333 | self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid) |
0ba1d257 CH |
1334 | try: |
1335 | # Allow escaping from running inside docker | |
1336 | cgroup = open("/proc/1/cgroup").read() | |
1337 | m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup) | |
1338 | if m: | |
1339 | self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd | |
1340 | except IOError: | |
1341 | pass | |
1342 | else: | |
1343 | logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd)) | |
1344 | ||
edd2bdf6 RZ |
1345 | def _config_frr(self, **params): |
1346 | "Configure FRR binaries" | |
787e7624 | 1347 | self.daemondir = params.get("frrdir") |
edd2bdf6 | 1348 | if self.daemondir is None: |
787e7624 | 1349 | self.daemondir = self.config_defaults.get("topogen", "frrdir") |
edd2bdf6 | 1350 | |
787e7624 | 1351 | zebra_path = os.path.join(self.daemondir, "zebra") |
edd2bdf6 RZ |
1352 | if not os.path.isfile(zebra_path): |
1353 | raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path)) | |
1354 | ||
2ab85530 RZ |
1355 | # pylint: disable=W0221 |
1356 | # Some params are only meaningful for the parent class. | |
594b1259 MW |
1357 | def config(self, **params): |
1358 | super(Router, self).config(**params) | |
1359 | ||
2ab85530 | 1360 | # User did not specify the daemons directory, try to autodetect it. |
787e7624 | 1361 | self.daemondir = params.get("daemondir") |
2ab85530 | 1362 | if self.daemondir is None: |
787e7624 | 1363 | self.routertype = params.get( |
1364 | "routertype", self.config_defaults.get("topogen", "routertype") | |
1365 | ) | |
622c4996 | 1366 | self._config_frr(**params) |
594b1259 | 1367 | else: |
2ab85530 | 1368 | # Test the provided path |
787e7624 | 1369 | zpath = os.path.join(self.daemondir, "zebra") |
2ab85530 | 1370 | if not os.path.isfile(zpath): |
787e7624 | 1371 | raise Exception("No zebra binary found in {}".format(zpath)) |
2ab85530 | 1372 | # Allow user to specify routertype when the path was specified. |
787e7624 | 1373 | if params.get("routertype") is not None: |
1374 | self.routertype = params.get("routertype") | |
2ab85530 | 1375 | |
787e7624 | 1376 | self.cmd("ulimit -c unlimited") |
594b1259 | 1377 | # Set ownership of config files |
787e7624 | 1378 | self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype)) |
2ab85530 | 1379 | |
594b1259 | 1380 | def terminate(self): |
cd79342c | 1381 | # Stop running FRR daemons |
99561211 | 1382 | self.stopRouter() |
594b1259 | 1383 | super(Router, self).terminate() |
49581587 | 1384 | os.system("chmod -R go+rw " + self.logdir) |
b0f0d980 | 1385 | |
cf865d1b | 1386 | # Return count of running daemons |
f033a78a DL |
1387 | def listDaemons(self): |
1388 | ret = [] | |
a53c08bc CH |
1389 | rc, stdout, _ = self.cmd_status( |
1390 | "ls -1 /var/run/%s/*.pid" % self.routertype, warn=False | |
1391 | ) | |
49581587 CH |
1392 | if rc: |
1393 | return ret | |
1394 | for d in stdout.strip().split("\n"): | |
1395 | pidfile = d.strip() | |
1396 | try: | |
1397 | pid = int(self.cmd_raises("cat %s" % pidfile, warn=False).strip()) | |
1398 | name = os.path.basename(pidfile[:-4]) | |
1399 | ||
1400 | # probably not compatible with bsd. | |
1401 | rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False) | |
1402 | if rc: | |
a53c08bc CH |
1403 | logger.warning( |
1404 | "%s: %s exited leaving pidfile %s (%s)", | |
1405 | self.name, | |
1406 | name, | |
1407 | pidfile, | |
1408 | pid, | |
1409 | ) | |
49581587 CH |
1410 | self.cmd("rm -- " + pidfile) |
1411 | else: | |
1412 | ret.append((name, pid)) | |
1413 | except (subprocess.CalledProcessError, ValueError): | |
1414 | pass | |
f033a78a | 1415 | return ret |
cf865d1b | 1416 | |
49581587 | 1417 | def stopRouter(self, assertOnError=True, minErrorVersion="5.1"): |
cf865d1b | 1418 | # Stop Running FRR Daemons |
49581587 CH |
1419 | running = self.listDaemons() |
1420 | if not running: | |
1421 | return "" | |
1422 | ||
1423 | logger.info("%s: stopping %s", self.name, ", ".join([x[0] for x in running])) | |
1424 | for name, pid in running: | |
1425 | logger.info("{}: sending SIGTERM to {}".format(self.name, name)) | |
1426 | try: | |
1427 | os.kill(pid, signal.SIGTERM) | |
1428 | except OSError as err: | |
a53c08bc CH |
1429 | logger.info( |
1430 | "%s: could not kill %s (%s): %s", self.name, name, pid, str(err) | |
1431 | ) | |
49581587 CH |
1432 | |
1433 | running = self.listDaemons() | |
1434 | if running: | |
1435 | for _ in range(0, 5): | |
701a0192 | 1436 | sleep( |
49581587 | 1437 | 0.5, |
701a0192 | 1438 | "{}: waiting for daemons stopping: {}".format( |
49581587 | 1439 | self.name, ", ".join([x[0] for x in running]) |
701a0192 | 1440 | ), |
1441 | ) | |
f033a78a | 1442 | running = self.listDaemons() |
49581587 CH |
1443 | if not running: |
1444 | break | |
f033a78a | 1445 | |
49581587 CH |
1446 | if not running: |
1447 | return "" | |
cf865d1b | 1448 | |
a53c08bc CH |
1449 | logger.warning( |
1450 | "%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running]) | |
1451 | ) | |
49581587 CH |
1452 | for name, pid in running: |
1453 | pidfile = "/var/run/{}/{}.pid".format(self.routertype, name) | |
1454 | logger.info("%s: killing %s", self.name, name) | |
1455 | self.cmd("kill -SIGBUS %d" % pid) | |
1456 | self.cmd("rm -- " + pidfile) | |
cf865d1b | 1457 | |
a53c08bc CH |
1458 | sleep( |
1459 | 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name | |
1460 | ) | |
f033a78a DL |
1461 | |
1462 | errors = self.checkRouterCores(reportOnce=True) | |
1463 | if self.checkRouterVersion("<", minErrorVersion): | |
1464 | # ignore errors in old versions | |
1465 | errors = "" | |
49581587 | 1466 | if assertOnError and (errors is not None) and len(errors) > 0: |
f033a78a | 1467 | assert "Errors found - details follow:" == 0, errors |
83c26937 | 1468 | return errors |
f76774ec | 1469 | |
594b1259 MW |
1470 | def removeIPs(self): |
1471 | for interface in self.intfNames(): | |
49581587 CH |
1472 | try: |
1473 | self.intf_ip_cmd(interface, "ip address flush " + interface) | |
1474 | except Exception as ex: | |
1475 | logger.error("%s can't remove IPs %s", self, str(ex)) | |
1476 | # pdb.set_trace() | |
1477 | # assert False, "can't remove IPs %s" % str(ex) | |
8dd5077d PG |
1478 | |
1479 | def checkCapability(self, daemon, param): | |
1480 | if param is not None: | |
1481 | daemon_path = os.path.join(self.daemondir, daemon) | |
787e7624 | 1482 | daemon_search_option = param.replace("-", "") |
1483 | output = self.cmd( | |
1484 | "{0} -h | grep {1}".format(daemon_path, daemon_search_option) | |
1485 | ) | |
8dd5077d PG |
1486 | if daemon_search_option not in output: |
1487 | return False | |
1488 | return True | |
1489 | ||
1490 | def loadConf(self, daemon, source=None, param=None): | |
49581587 CH |
1491 | # Unfortunately this API allowsfor source to not exist for any and all routers. |
1492 | ||
594b1259 MW |
1493 | # print "Daemons before:", self.daemons |
1494 | if daemon in self.daemons.keys(): | |
1495 | self.daemons[daemon] = 1 | |
8dd5077d PG |
1496 | if param is not None: |
1497 | self.daemons_options[daemon] = param | |
49581587 CH |
1498 | conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon) |
1499 | if source is None or not os.path.exists(source): | |
1500 | self.cmd_raises("touch " + conf_file) | |
594b1259 | 1501 | else: |
49581587 CH |
1502 | self.cmd_raises("cp {} {}".format(source, conf_file)) |
1503 | self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file)) | |
1504 | self.cmd_raises("chmod 664 {}".format(conf_file)) | |
92be50e6 | 1505 | if (daemon == "snmpd") and (self.routertype == "frr"): |
49581587 | 1506 | # /etc/snmp is private mount now |
92be50e6 | 1507 | self.cmd('echo "agentXSocket /etc/frr/agentx" > /etc/snmp/frr.conf') |
49581587 CH |
1508 | self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf') |
1509 | ||
787e7624 | 1510 | if (daemon == "zebra") and (self.daemons["staticd"] == 0): |
a2a1134c | 1511 | # Add staticd with zebra - if it exists |
49581587 CH |
1512 | try: |
1513 | staticd_path = os.path.join(self.daemondir, "staticd") | |
1514 | except: | |
1515 | pdb.set_trace() | |
1516 | ||
a2a1134c | 1517 | if os.path.isfile(staticd_path): |
787e7624 | 1518 | self.daemons["staticd"] = 1 |
1519 | self.daemons_options["staticd"] = "" | |
2c805e6c | 1520 | # Auto-Started staticd has no config, so it will read from zebra config |
594b1259 | 1521 | else: |
787e7624 | 1522 | logger.info("No daemon {} known".format(daemon)) |
594b1259 | 1523 | # print "Daemons after:", self.daemons |
e1dfa45e | 1524 | |
3f950192 | 1525 | def runInWindow(self, cmd, title=None): |
49581587 | 1526 | return self.run_in_window(cmd, title) |
3f950192 | 1527 | |
9711fc7e | 1528 | def startRouter(self, tgen=None): |
594b1259 | 1529 | # Disable integrated-vtysh-config |
787e7624 | 1530 | self.cmd( |
1531 | 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf' | |
1532 | % self.routertype | |
1533 | ) | |
1534 | self.cmd( | |
1535 | "chown %s:%svty /etc/%s/vtysh.conf" | |
1536 | % (self.routertype, self.routertype, self.routertype) | |
1537 | ) | |
13e1fc49 | 1538 | # TODO remove the following lines after all tests are migrated to Topogen. |
594b1259 | 1539 | # Try to find relevant old logfiles in /tmp and delete them |
787e7624 | 1540 | map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name))) |
594b1259 | 1541 | # Remove old core files |
787e7624 | 1542 | map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name))) |
594b1259 MW |
1543 | # Remove IP addresses from OS first - we have them in zebra.conf |
1544 | self.removeIPs() | |
1545 | # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher | |
1546 | # No error - but return message and skip all the tests | |
787e7624 | 1547 | if self.daemons["ldpd"] == 1: |
1548 | ldpd_path = os.path.join(self.daemondir, "ldpd") | |
2ab85530 | 1549 | if not os.path.isfile(ldpd_path): |
222ea88b | 1550 | logger.info("LDP Test, but no ldpd compiled or installed") |
594b1259 | 1551 | return "LDP Test, but no ldpd compiled or installed" |
dd4eca4d | 1552 | |
787e7624 | 1553 | if version_cmp(platform.release(), "4.5") < 0: |
222ea88b | 1554 | logger.info("LDP Test need Linux Kernel 4.5 minimum") |
45619ee3 | 1555 | return "LDP Test need Linux Kernel 4.5 minimum" |
9711fc7e LB |
1556 | # Check if have mpls |
1557 | if tgen != None: | |
1558 | self.hasmpls = tgen.hasmpls | |
1559 | if self.hasmpls != True: | |
787e7624 | 1560 | logger.info( |
1561 | "LDP/MPLS Tests will be skipped, platform missing module(s)" | |
1562 | ) | |
9711fc7e LB |
1563 | else: |
1564 | # Test for MPLS Kernel modules available | |
1565 | self.hasmpls = False | |
787e7624 | 1566 | if not module_present("mpls-router"): |
1567 | logger.info( | |
1568 | "MPLS tests will not run (missing mpls-router kernel module)" | |
1569 | ) | |
1570 | elif not module_present("mpls-iptunnel"): | |
1571 | logger.info( | |
1572 | "MPLS tests will not run (missing mpls-iptunnel kernel module)" | |
1573 | ) | |
9711fc7e LB |
1574 | else: |
1575 | self.hasmpls = True | |
1576 | if self.hasmpls != True: | |
1577 | return "LDP/MPLS Tests need mpls kernel modules" | |
49581587 CH |
1578 | |
1579 | # Really want to use sysctl_atleast here, but only when MPLS is actually being | |
1580 | # used | |
787e7624 | 1581 | self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels") |
44a592b2 | 1582 | |
3f950192 CH |
1583 | shell_routers = g_extra_config["shell"] |
1584 | if "all" in shell_routers or self.name in shell_routers: | |
49581587 | 1585 | self.run_in_window(os.getenv("SHELL", "bash")) |
3f950192 CH |
1586 | |
1587 | vtysh_routers = g_extra_config["vtysh"] | |
1588 | if "all" in vtysh_routers or self.name in vtysh_routers: | |
49581587 | 1589 | self.run_in_window("vtysh") |
3f950192 | 1590 | |
787e7624 | 1591 | if self.daemons["eigrpd"] == 1: |
1592 | eigrpd_path = os.path.join(self.daemondir, "eigrpd") | |
44a592b2 | 1593 | if not os.path.isfile(eigrpd_path): |
222ea88b | 1594 | logger.info("EIGRP Test, but no eigrpd compiled or installed") |
44a592b2 MW |
1595 | return "EIGRP Test, but no eigrpd compiled or installed" |
1596 | ||
787e7624 | 1597 | if self.daemons["bfdd"] == 1: |
1598 | bfdd_path = os.path.join(self.daemondir, "bfdd") | |
4d45d6d3 RZ |
1599 | if not os.path.isfile(bfdd_path): |
1600 | logger.info("BFD Test, but no bfdd compiled or installed") | |
1601 | return "BFD Test, but no bfdd compiled or installed" | |
1602 | ||
0c449b01 | 1603 | return self.startRouterDaemons(tgen=tgen) |
aa5261bf | 1604 | |
aa5261bf RZ |
1605 | def getStdErr(self, daemon): |
1606 | return self.getLog("err", daemon) | |
1607 | ||
1608 | def getStdOut(self, daemon): | |
1609 | return self.getLog("out", daemon) | |
1610 | ||
1611 | def getLog(self, log, daemon): | |
1612 | return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log)) | |
1613 | ||
0c449b01 | 1614 | def startRouterDaemons(self, daemons=None, tgen=None): |
49581587 | 1615 | "Starts FRR daemons for this router." |
e1dfa45e | 1616 | |
0ba1d257 | 1617 | asan_abort = g_extra_config["asan_abort"] |
0b25370e | 1618 | gdb_breakpoints = g_extra_config["gdb_breakpoints"] |
3f950192 CH |
1619 | gdb_daemons = g_extra_config["gdb_daemons"] |
1620 | gdb_routers = g_extra_config["gdb_routers"] | |
e58133a7 CH |
1621 | valgrind_extra = g_extra_config["valgrind_extra"] |
1622 | valgrind_memleaks = g_extra_config["valgrind_memleaks"] | |
0ba1d257 | 1623 | strace_daemons = g_extra_config["strace_daemons"] |
3f950192 | 1624 | |
49581587 CH |
1625 | # Get global bundle data |
1626 | if not self.path_exists("/etc/frr/support_bundle_commands.conf"): | |
1627 | # Copy global value if was covered by namespace mount | |
1628 | bundle_data = "" | |
1629 | if os.path.exists("/etc/frr/support_bundle_commands.conf"): | |
1630 | with open("/etc/frr/support_bundle_commands.conf", "r") as rf: | |
1631 | bundle_data = rf.read() | |
1632 | self.cmd_raises( | |
1633 | "cat > /etc/frr/support_bundle_commands.conf", | |
1634 | stdin=bundle_data, | |
701a0192 | 1635 | ) |
c39fe454 | 1636 | |
e1dfa45e LB |
1637 | # Starts actual daemons without init (ie restart) |
1638 | # cd to per node directory | |
49581587 CH |
1639 | self.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self.logdir, self.name)) |
1640 | self.set_cwd("{}/{}".format(self.logdir, self.name)) | |
787e7624 | 1641 | self.cmd("umask 000") |
aa5261bf | 1642 | |
787e7624 | 1643 | # Re-enable to allow for report per run |
2a59a86b | 1644 | self.reportCores = True |
aa5261bf RZ |
1645 | |
1646 | # XXX: glue code forward ported from removed function. | |
fb80b81b | 1647 | if self.version == None: |
787e7624 | 1648 | self.version = self.cmd( |
c39fe454 | 1649 | os.path.join(self.daemondir, "bgpd") + " -v" |
787e7624 | 1650 | ).split()[2] |
1651 | logger.info("{}: running version: {}".format(self.name, self.version)) | |
aa5261bf RZ |
1652 | # If `daemons` was specified then some upper API called us with |
1653 | # specific daemons, otherwise just use our own configuration. | |
1654 | daemons_list = [] | |
3f950192 | 1655 | if daemons is not None: |
bb91e9c0 MS |
1656 | daemons_list = daemons |
1657 | else: | |
aa5261bf RZ |
1658 | # Append all daemons configured. |
1659 | for daemon in self.daemons: | |
1660 | if self.daemons[daemon] == 1: | |
1661 | daemons_list.append(daemon) | |
1662 | ||
3f950192 CH |
1663 | def start_daemon(daemon, extra_opts=None): |
1664 | daemon_opts = self.daemons_options.get(daemon, "") | |
1665 | rediropt = " > {0}.out 2> {0}.err".format(daemon) | |
1666 | if daemon == "snmpd": | |
1667 | binary = "/usr/sbin/snmpd" | |
1668 | cmdenv = "" | |
1669 | cmdopt = "{} -C -c /etc/frr/snmpd.conf -p ".format( | |
1670 | daemon_opts | |
1671 | ) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self.routertype) | |
1672 | else: | |
1673 | binary = os.path.join(self.daemondir, daemon) | |
e58133a7 | 1674 | |
0ba1d257 CH |
1675 | cmdenv = "ASAN_OPTIONS=" |
1676 | if asan_abort: | |
1677 | cmdenv = "abort_on_error=1:" | |
a53c08bc CH |
1678 | cmdenv += "log_path={0}/{1}.{2}.asan ".format( |
1679 | self.logdir, self.name, daemon | |
1680 | ) | |
0ba1d257 | 1681 | |
e58133a7 | 1682 | if valgrind_memleaks: |
a53c08bc CH |
1683 | this_dir = os.path.dirname( |
1684 | os.path.abspath(os.path.realpath(__file__)) | |
1685 | ) | |
1686 | supp_file = os.path.abspath( | |
1687 | os.path.join(this_dir, "../../../tools/valgrind.supp") | |
1688 | ) | |
1689 | cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format( | |
1690 | daemon, self.logdir, self.name, supp_file | |
1691 | ) | |
e58133a7 | 1692 | if valgrind_extra: |
a53c08bc CH |
1693 | cmdenv += ( |
1694 | "--gen-suppressions=all --expensive-definedness-checks=yes" | |
1695 | ) | |
0ba1d257 | 1696 | elif daemon in strace_daemons or "all" in strace_daemons: |
a53c08bc CH |
1697 | cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format( |
1698 | daemon, self.logdir, self.name | |
1699 | ) | |
0ba1d257 | 1700 | |
3f950192 CH |
1701 | cmdopt = "{} --log file:{}.log --log-level debug".format( |
1702 | daemon_opts, daemon | |
787e7624 | 1703 | ) |
3f950192 CH |
1704 | if extra_opts: |
1705 | cmdopt += " " + extra_opts | |
1706 | ||
1707 | if ( | |
1708 | (gdb_routers or gdb_daemons) | |
0b25370e DS |
1709 | and ( |
1710 | not gdb_routers or self.name in gdb_routers or "all" in gdb_routers | |
1711 | ) | |
1712 | and (not gdb_daemons or daemon in gdb_daemons or "all" in gdb_daemons) | |
3f950192 CH |
1713 | ): |
1714 | if daemon == "snmpd": | |
1715 | cmdopt += " -f " | |
1716 | ||
1717 | cmdopt += rediropt | |
1718 | gdbcmd = "sudo -E gdb " + binary | |
1719 | if gdb_breakpoints: | |
1720 | gdbcmd += " -ex 'set breakpoint pending on'" | |
1721 | for bp in gdb_breakpoints: | |
1722 | gdbcmd += " -ex 'b {}'".format(bp) | |
1723 | gdbcmd += " -ex 'run {}'".format(cmdopt) | |
1724 | ||
49581587 CH |
1725 | self.run_in_window(gdbcmd, daemon) |
1726 | ||
a53c08bc CH |
1727 | logger.info( |
1728 | "%s: %s %s launched in gdb window", self, self.routertype, daemon | |
1729 | ) | |
3f950192 CH |
1730 | else: |
1731 | if daemon != "snmpd": | |
1732 | cmdopt += " -d " | |
1733 | cmdopt += rediropt | |
49581587 CH |
1734 | |
1735 | try: | |
1736 | self.cmd_raises(" ".join([cmdenv, binary, cmdopt]), warn=False) | |
1737 | except subprocess.CalledProcessError as error: | |
1738 | self.logger.error( | |
1739 | '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:', | |
a53c08bc CH |
1740 | self, |
1741 | daemon, | |
1742 | error.returncode, | |
1743 | error.cmd, | |
1744 | '\n:stdout: "{}"'.format(error.stdout.strip()) | |
1745 | if error.stdout | |
1746 | else "", | |
1747 | '\n:stderr: "{}"'.format(error.stderr.strip()) | |
1748 | if error.stderr | |
1749 | else "", | |
49581587 CH |
1750 | ) |
1751 | else: | |
1752 | logger.info("%s: %s %s started", self, self.routertype, daemon) | |
3f950192 | 1753 | |
3f950192 CH |
1754 | # Start Zebra first |
1755 | if "zebra" in daemons_list: | |
1756 | start_daemon("zebra", "-s 90000000") | |
c39fe454 KK |
1757 | while "zebra" in daemons_list: |
1758 | daemons_list.remove("zebra") | |
aa5261bf | 1759 | |
a2a1134c | 1760 | # Start staticd next if required |
c39fe454 | 1761 | if "staticd" in daemons_list: |
3f950192 | 1762 | start_daemon("staticd") |
c39fe454 KK |
1763 | while "staticd" in daemons_list: |
1764 | daemons_list.remove("staticd") | |
aa5261bf | 1765 | |
92be50e6 | 1766 | if "snmpd" in daemons_list: |
49581587 CH |
1767 | # Give zerbra a chance to configure interface addresses that snmpd daemon |
1768 | # may then use. | |
1769 | time.sleep(2) | |
1770 | ||
3f950192 | 1771 | start_daemon("snmpd") |
92be50e6 BC |
1772 | while "snmpd" in daemons_list: |
1773 | daemons_list.remove("snmpd") | |
1774 | ||
49581587 CH |
1775 | if daemons is None: |
1776 | # Fix Link-Local Addresses on initial startup | |
1777 | # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this | |
1778 | _, output, _ = self.cmd_status( | |
1779 | "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done", | |
a53c08bc | 1780 | stderr=subprocess.STDOUT, |
49581587 CH |
1781 | ) |
1782 | logger.debug("Set MACs:\n%s", output) | |
aa5261bf | 1783 | |
594b1259 | 1784 | # Now start all the other daemons |
cb3e512d | 1785 | for daemon in daemons_list: |
aa5261bf | 1786 | if self.daemons[daemon] == 0: |
2ab85530 | 1787 | continue |
3f950192 | 1788 | start_daemon(daemon) |
787e7624 | 1789 | |
aa5261bf | 1790 | # Check if daemons are running. |
c39fe454 | 1791 | rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype) |
c65a7e26 KK |
1792 | if re.search(r"No such file or directory", rundaemons): |
1793 | return "Daemons are not running" | |
1794 | ||
49581587 CH |
1795 | # Update the permissions on the log files |
1796 | self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name)) | |
1797 | self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name)) | |
1798 | ||
c65a7e26 KK |
1799 | return "" |
1800 | ||
c39fe454 KK |
1801 | def killRouterDaemons( |
1802 | self, daemons, wait=True, assertOnError=True, minErrorVersion="5.1" | |
1803 | ): | |
622c4996 | 1804 | # Kill Running FRR |
c65a7e26 | 1805 | # Daemons(user specified daemon only) using SIGKILL |
c39fe454 | 1806 | rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype) |
c65a7e26 KK |
1807 | errors = "" |
1808 | daemonsNotRunning = [] | |
1809 | if re.search(r"No such file or directory", rundaemons): | |
1810 | return errors | |
1811 | for daemon in daemons: | |
1812 | if rundaemons is not None and daemon in rundaemons: | |
1813 | numRunning = 0 | |
701a0192 | 1814 | dmns = rundaemons.split("\n") |
cd79342c MS |
1815 | # Exclude empty string at end of list |
1816 | for d in dmns[:-1]: | |
c65a7e26 | 1817 | if re.search(r"%s" % daemon, d): |
c39fe454 KK |
1818 | daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip() |
1819 | if daemonpid.isdigit() and pid_exists(int(daemonpid)): | |
1820 | logger.info( | |
1821 | "{}: killing {}".format( | |
1822 | self.name, | |
1823 | os.path.basename(d.rstrip().rsplit(".", 1)[0]), | |
1824 | ) | |
1825 | ) | |
1826 | self.cmd("kill -9 %s" % daemonpid) | |
c65a7e26 KK |
1827 | if pid_exists(int(daemonpid)): |
1828 | numRunning += 1 | |
1829 | if wait and numRunning > 0: | |
c39fe454 KK |
1830 | sleep( |
1831 | 2, | |
1832 | "{}: waiting for {} daemon to be stopped".format( | |
1833 | self.name, daemon | |
1834 | ), | |
1835 | ) | |
cd79342c | 1836 | |
c65a7e26 | 1837 | # 2nd round of kill if daemons didn't exit |
cd79342c | 1838 | for d in dmns[:-1]: |
c65a7e26 | 1839 | if re.search(r"%s" % daemon, d): |
c39fe454 KK |
1840 | daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip() |
1841 | if daemonpid.isdigit() and pid_exists( | |
1842 | int(daemonpid) | |
1843 | ): | |
1844 | logger.info( | |
1845 | "{}: killing {}".format( | |
1846 | self.name, | |
1847 | os.path.basename( | |
1848 | d.rstrip().rsplit(".", 1)[0] | |
1849 | ), | |
1850 | ) | |
1851 | ) | |
1852 | self.cmd("kill -9 %s" % daemonpid) | |
c39fe454 | 1853 | self.cmd("rm -- {}".format(d.rstrip())) |
c65a7e26 KK |
1854 | if wait: |
1855 | errors = self.checkRouterCores(reportOnce=True) | |
c39fe454 KK |
1856 | if self.checkRouterVersion("<", minErrorVersion): |
1857 | # ignore errors in old versions | |
c65a7e26 KK |
1858 | errors = "" |
1859 | if assertOnError and len(errors) > 0: | |
1860 | assert "Errors found - details follow:" == 0, errors | |
c65a7e26 KK |
1861 | else: |
1862 | daemonsNotRunning.append(daemon) | |
1863 | if len(daemonsNotRunning) > 0: | |
c39fe454 | 1864 | errors = errors + "Daemons are not running", daemonsNotRunning |
c65a7e26 KK |
1865 | |
1866 | return errors | |
1867 | ||
2a59a86b LB |
1868 | def checkRouterCores(self, reportLeaks=True, reportOnce=False): |
1869 | if reportOnce and not self.reportCores: | |
1870 | return | |
1871 | reportMade = False | |
83c26937 | 1872 | traces = "" |
f76774ec | 1873 | for daemon in self.daemons: |
787e7624 | 1874 | if self.daemons[daemon] == 1: |
f76774ec | 1875 | # Look for core file |
787e7624 | 1876 | corefiles = glob.glob( |
1877 | "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon) | |
1878 | ) | |
1879 | if len(corefiles) > 0: | |
79f6fdeb | 1880 | backtrace = gdb_core(self, daemon, corefiles) |
787e7624 | 1881 | traces = ( |
1882 | traces | |
1883 | + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s" | |
1884 | % (self.name, daemon, backtrace) | |
1885 | ) | |
2a59a86b | 1886 | reportMade = True |
f76774ec LB |
1887 | elif reportLeaks: |
1888 | log = self.getStdErr(daemon) | |
1889 | if "memstats" in log: | |
787e7624 | 1890 | sys.stderr.write( |
1891 | "%s: %s has memory leaks:\n" % (self.name, daemon) | |
1892 | ) | |
1893 | traces = traces + "\n%s: %s has memory leaks:\n" % ( | |
1894 | self.name, | |
1895 | daemon, | |
1896 | ) | |
f76774ec | 1897 | log = re.sub("core_handler: ", "", log) |
787e7624 | 1898 | log = re.sub( |
1899 | r"(showing active allocations in memory group [a-zA-Z0-9]+)", | |
1900 | r"\n ## \1", | |
1901 | log, | |
1902 | ) | |
f76774ec LB |
1903 | log = re.sub("memstats: ", " ", log) |
1904 | sys.stderr.write(log) | |
2a59a86b | 1905 | reportMade = True |
f76774ec | 1906 | # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found |
787e7624 | 1907 | if checkAddressSanitizerError( |
be2656ed | 1908 | self.getStdErr(daemon), self.name, daemon, self.logdir |
787e7624 | 1909 | ): |
1910 | sys.stderr.write( | |
1911 | "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon) | |
1912 | ) | |
1913 | traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % ( | |
1914 | self.name, | |
1915 | daemon, | |
1916 | ) | |
2a59a86b LB |
1917 | reportMade = True |
1918 | if reportMade: | |
1919 | self.reportCores = False | |
83c26937 | 1920 | return traces |
f76774ec | 1921 | |
594b1259 | 1922 | def checkRouterRunning(self): |
597cabb7 MW |
1923 | "Check if router daemons are running and collect crashinfo they don't run" |
1924 | ||
594b1259 MW |
1925 | global fatal_error |
1926 | ||
787e7624 | 1927 | daemonsRunning = self.cmd( |
1928 | 'vtysh -c "show logging" | grep "Logging configuration for"' | |
1929 | ) | |
4942f298 MW |
1930 | # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found |
1931 | if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"): | |
1932 | return "%s: vtysh killed by AddressSanitizer" % (self.name) | |
1933 | ||
594b1259 | 1934 | for daemon in self.daemons: |
662c0576 KS |
1935 | if daemon == "snmpd": |
1936 | continue | |
594b1259 MW |
1937 | if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning): |
1938 | sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon)) | |
11761ab0 | 1939 | if daemon == "staticd": |
787e7624 | 1940 | sys.stderr.write( |
1941 | "You may have a copy of staticd installed but are attempting to test against\n" | |
1942 | ) | |
1943 | sys.stderr.write( | |
1944 | "a version of FRR that does not have staticd, please cleanup the install dir\n" | |
1945 | ) | |
d2132114 | 1946 | |
594b1259 | 1947 | # Look for core file |
787e7624 | 1948 | corefiles = glob.glob( |
1949 | "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon) | |
1950 | ) | |
1951 | if len(corefiles) > 0: | |
79f6fdeb | 1952 | gdb_core(self, daemon, corefiles) |
594b1259 MW |
1953 | else: |
1954 | # No core found - If we find matching logfile in /tmp, then print last 20 lines from it. | |
787e7624 | 1955 | if os.path.isfile( |
1956 | "{}/{}/{}.log".format(self.logdir, self.name, daemon) | |
1957 | ): | |
1958 | log_tail = subprocess.check_output( | |
1959 | [ | |
1960 | "tail -n20 {}/{}/{}.log 2> /dev/null".format( | |
1961 | self.logdir, self.name, daemon | |
1962 | ) | |
1963 | ], | |
1964 | shell=True, | |
1965 | ) | |
1966 | sys.stderr.write( | |
1967 | "\nFrom %s %s %s log file:\n" | |
1968 | % (self.routertype, self.name, daemon) | |
1969 | ) | |
594b1259 | 1970 | sys.stderr.write("%s\n" % log_tail) |
4942f298 | 1971 | |
597cabb7 | 1972 | # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found |
787e7624 | 1973 | if checkAddressSanitizerError( |
be2656ed | 1974 | self.getStdErr(daemon), self.name, daemon, self.logdir |
787e7624 | 1975 | ): |
1976 | return "%s: Daemon %s not running - killed by AddressSanitizer" % ( | |
1977 | self.name, | |
1978 | daemon, | |
1979 | ) | |
84379e8e | 1980 | |
594b1259 MW |
1981 | return "%s: Daemon %s not running" % (self.name, daemon) |
1982 | return "" | |
fb80b81b LB |
1983 | |
1984 | def checkRouterVersion(self, cmpop, version): | |
1985 | """ | |
1986 | Compares router version using operation `cmpop` with `version`. | |
1987 | Valid `cmpop` values: | |
1988 | * `>=`: has the same version or greater | |
1989 | * '>': has greater version | |
1990 | * '=': has the same version | |
1991 | * '<': has a lesser version | |
1992 | * '<=': has the same version or lesser | |
1993 | ||
1994 | Usage example: router.checkRouterVersion('>', '1.0') | |
1995 | """ | |
6bfe4b8b MW |
1996 | |
1997 | # Make sure we have version information first | |
1998 | if self.version == None: | |
787e7624 | 1999 | self.version = self.cmd( |
2000 | os.path.join(self.daemondir, "bgpd") + " -v" | |
2001 | ).split()[2] | |
2002 | logger.info("{}: running version: {}".format(self.name, self.version)) | |
6bfe4b8b | 2003 | |
fb80b81b | 2004 | rversion = self.version |
11761ab0 | 2005 | if rversion == None: |
fb80b81b LB |
2006 | return False |
2007 | ||
2008 | result = version_cmp(rversion, version) | |
787e7624 | 2009 | if cmpop == ">=": |
fb80b81b | 2010 | return result >= 0 |
787e7624 | 2011 | if cmpop == ">": |
fb80b81b | 2012 | return result > 0 |
787e7624 | 2013 | if cmpop == "=": |
fb80b81b | 2014 | return result == 0 |
787e7624 | 2015 | if cmpop == "<": |
fb80b81b | 2016 | return result < 0 |
787e7624 | 2017 | if cmpop == "<": |
fb80b81b | 2018 | return result < 0 |
787e7624 | 2019 | if cmpop == "<=": |
fb80b81b LB |
2020 | return result <= 0 |
2021 | ||
594b1259 MW |
2022 | def get_ipv6_linklocal(self): |
2023 | "Get LinkLocal Addresses from interfaces" | |
2024 | ||
2025 | linklocal = [] | |
2026 | ||
787e7624 | 2027 | ifaces = self.cmd("ip -6 address") |
594b1259 | 2028 | # Fix newlines (make them all the same) |
787e7624 | 2029 | ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines() |
2030 | interface = "" | |
2031 | ll_per_if_count = 0 | |
594b1259 | 2032 | for line in ifaces: |
fd03dacd | 2033 | m = re.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line) |
594b1259 MW |
2034 | if m: |
2035 | interface = m.group(1) | |
2036 | ll_per_if_count = 0 | |
787e7624 | 2037 | m = re.search( |
2038 | "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link", | |
2039 | line, | |
2040 | ) | |
594b1259 MW |
2041 | if m: |
2042 | local = m.group(1) | |
2043 | ll_per_if_count += 1 | |
787e7624 | 2044 | if ll_per_if_count > 1: |
594b1259 MW |
2045 | linklocal += [["%s-%s" % (interface, ll_per_if_count), local]] |
2046 | else: | |
2047 | linklocal += [[interface, local]] | |
2048 | return linklocal | |
787e7624 | 2049 | |
80eeefb7 MW |
2050 | def daemon_available(self, daemon): |
2051 | "Check if specified daemon is installed (and for ldp if kernel supports MPLS)" | |
2052 | ||
2ab85530 RZ |
2053 | daemon_path = os.path.join(self.daemondir, daemon) |
2054 | if not os.path.isfile(daemon_path): | |
80eeefb7 | 2055 | return False |
787e7624 | 2056 | if daemon == "ldpd": |
2057 | if version_cmp(platform.release(), "4.5") < 0: | |
b431b554 | 2058 | return False |
787e7624 | 2059 | if not module_present("mpls-router", load=False): |
80eeefb7 | 2060 | return False |
787e7624 | 2061 | if not module_present("mpls-iptunnel", load=False): |
b431b554 | 2062 | return False |
80eeefb7 | 2063 | return True |
f2d6ce41 | 2064 | |
80eeefb7 | 2065 | def get_routertype(self): |
622c4996 | 2066 | "Return the type of Router (frr)" |
80eeefb7 MW |
2067 | |
2068 | return self.routertype | |
787e7624 | 2069 | |
50c40bde MW |
2070 | def report_memory_leaks(self, filename_prefix, testscript): |
2071 | "Report Memory Leaks to file prefixed with given string" | |
2072 | ||
2073 | leakfound = False | |
2074 | filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt" | |
2075 | for daemon in self.daemons: | |
787e7624 | 2076 | if self.daemons[daemon] == 1: |
50c40bde MW |
2077 | log = self.getStdErr(daemon) |
2078 | if "memstats" in log: | |
2079 | # Found memory leak | |
787e7624 | 2080 | logger.info( |
2081 | "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log) | |
2082 | ) | |
50c40bde MW |
2083 | if not leakfound: |
2084 | leakfound = True | |
2085 | # Check if file already exists | |
2086 | fileexists = os.path.isfile(filename) | |
2087 | leakfile = open(filename, "a") | |
2088 | if not fileexists: | |
2089 | # New file - add header | |
787e7624 | 2090 | leakfile.write( |
2091 | "# Memory Leak Detection for topotest %s\n\n" | |
2092 | % testscript | |
2093 | ) | |
50c40bde MW |
2094 | leakfile.write("## Router %s\n" % self.name) |
2095 | leakfile.write("### Process %s\n" % daemon) | |
2096 | log = re.sub("core_handler: ", "", log) | |
787e7624 | 2097 | log = re.sub( |
2098 | r"(showing active allocations in memory group [a-zA-Z0-9]+)", | |
2099 | r"\n#### \1\n", | |
2100 | log, | |
2101 | ) | |
50c40bde MW |
2102 | log = re.sub("memstats: ", " ", log) |
2103 | leakfile.write(log) | |
2104 | leakfile.write("\n") | |
2105 | if leakfound: | |
2106 | leakfile.close() | |
80eeefb7 | 2107 | |
787e7624 | 2108 | |
61196140 | 2109 | def frr_unicode(s): |
701a0192 | 2110 | """Convert string to unicode, depending on python version""" |
61196140 MS |
2111 | if sys.version_info[0] > 2: |
2112 | return s | |
2113 | else: | |
49581587 | 2114 | return unicode(s) # pylint: disable=E0602 |
c8e5983d CH |
2115 | |
2116 | ||
2117 | def is_mapping(o): | |
2118 | return isinstance(o, Mapping) |