]> git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/lib/topotest.py
lib: use per node log directory when topotest is used without topogen
[mirror_frr.git] / tests / topotests / lib / topotest.py
1 #!/usr/bin/env python
2
3 #
4 # topotest.py
5 # Library of helper functions for NetDEF Topology Tests
6 #
7 # Copyright (c) 2016 by
8 # Network Device Education Foundation, Inc. ("NetDEF")
9 #
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
13 # in all copies.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 # OF THIS SOFTWARE.
23 #
24
25 import json
26 import os
27 import errno
28 import re
29 import sys
30 import glob
31 import StringIO
32 import subprocess
33 import tempfile
34 import platform
35 import difflib
36 import time
37
38 from lib.topolog import logger
39
40 from mininet.topo import Topo
41 from mininet.net import Mininet
42 from mininet.node import Node, OVSSwitch, Host
43 from mininet.log import setLogLevel, info
44 from mininet.cli import CLI
45 from mininet.link import Intf
46
47 class json_cmp_result(object):
48 "json_cmp result class for better assertion messages"
49
50 def __init__(self):
51 self.errors = []
52
53 def add_error(self, error):
54 "Append error message to the result"
55 for line in error.splitlines():
56 self.errors.append(line)
57
58 def has_errors(self):
59 "Returns True if there were errors, otherwise False."
60 return len(self.errors) > 0
61
62 def get_test_logdir(node=None, init=False):
63 """
64 Return the current test log directory based on PYTEST_CURRENT_TEST
65 environment variable.
66 Optional paramters:
67 node: when set, adds the node specific log directory to the init dir
68 init: when set, initializes the log directory and fixes path permissions
69 """
70 cur_test = os.environ['PYTEST_CURRENT_TEST']
71
72 ret = '/tmp/topotests/' + cur_test[0:cur_test.find(".py")].replace('/','.')
73 if node != None:
74 dir = ret + "/" + node
75 if init:
76 os.system('mkdir -p ' + dir)
77 os.system('chmod 775 ' + dir)
78 return ret
79
80 def json_diff(d1, d2):
81 """
82 Returns a string with the difference between JSON data.
83 """
84 json_format_opts = {
85 'indent': 4,
86 'sort_keys': True,
87 }
88 dstr1 = json.dumps(d1, **json_format_opts)
89 dstr2 = json.dumps(d2, **json_format_opts)
90 return difflines(dstr2, dstr1, title1='Expected value', title2='Current value', n=0)
91
92 def json_cmp(d1, d2):
93 """
94 JSON compare function. Receives two parameters:
95 * `d1`: json value
96 * `d2`: json subset which we expect
97
98 Returns `None` when all keys that `d1` has matches `d2`,
99 otherwise a string containing what failed.
100
101 Note: key absence can be tested by adding a key with value `None`.
102 """
103 squeue = [(d1, d2, 'json')]
104 result = json_cmp_result()
105 for s in squeue:
106 nd1, nd2, parent = s
107 s1, s2 = set(nd1), set(nd2)
108
109 # Expect all required fields to exist.
110 s2_req = set([key for key in nd2 if nd2[key] is not None])
111 diff = s2_req - s1
112 if diff != set({}):
113 result.add_error('expected key(s) {} in {} (have {}):\n{}'.format(
114 str(list(diff)), parent, str(list(s1)), json_diff(nd1, nd2)))
115
116 for key in s2.intersection(s1):
117 # Test for non existence of key in d2
118 if nd2[key] is None:
119 result.add_error('"{}" should not exist in {} (have {}):\n{}'.format(
120 key, parent, str(s1), json_diff(nd1[key], nd2[key])))
121 continue
122 # If nd1 key is a dict, we have to recurse in it later.
123 if isinstance(nd2[key], type({})):
124 if not isinstance(nd1[key], type({})):
125 result.add_error(
126 '{}["{}"] has different type than expected '.format(parent, key) +
127 '(have {}, expected {}):\n{}'.format(
128 type(nd1[key]), type(nd2[key]), json_diff(nd1[key], nd2[key])))
129 continue
130 nparent = '{}["{}"]'.format(parent, key)
131 squeue.append((nd1[key], nd2[key], nparent))
132 continue
133 # Check list items
134 if isinstance(nd2[key], type([])):
135 if not isinstance(nd1[key], type([])):
136 result.add_error(
137 '{}["{}"] has different type than expected '.format(parent, key) +
138 '(have {}, expected {}):\n{}'.format(
139 type(nd1[key]), type(nd2[key]), json_diff(nd1[key], nd2[key])))
140 continue
141 # Check list size
142 if len(nd2[key]) > len(nd1[key]):
143 result.add_error(
144 '{}["{}"] too few items '.format(parent, key) +
145 '(have {}, expected {}:\n {})'.format(
146 len(nd1[key]), len(nd2[key]),
147 json_diff(nd1[key], nd2[key])))
148 continue
149
150 # List all unmatched items errors
151 unmatched = []
152 for expected in nd2[key]:
153 matched = False
154 for value in nd1[key]:
155 if json_cmp({'json': value}, {'json': expected}) is None:
156 matched = True
157 break
158
159 if matched:
160 break
161 if not matched:
162 unmatched.append(expected)
163
164 # If there are unmatched items, error out.
165 if unmatched:
166 result.add_error(
167 '{}["{}"] value is different (\n{})'.format(
168 parent, key, json_diff(nd1[key], nd2[key])))
169 continue
170
171 # Compare JSON values
172 if nd1[key] != nd2[key]:
173 result.add_error(
174 '{}["{}"] value is different (\n{})'.format(
175 parent, key, json_diff(nd1[key], nd2[key])))
176 continue
177
178 if result.has_errors():
179 return result
180
181 return None
182
183 def run_and_expect(func, what, count=20, wait=3):
184 """
185 Run `func` and compare the result with `what`. Do it for `count` times
186 waiting `wait` seconds between tries. By default it tries 20 times with
187 3 seconds delay between tries.
188
189 Returns (True, func-return) on success or
190 (False, func-return) on failure.
191 """
192 while count > 0:
193 result = func()
194 if result != what:
195 time.sleep(wait)
196 count -= 1
197 continue
198 return (True, result)
199 return (False, result)
200
201
202 def int2dpid(dpid):
203 "Converting Integer to DPID"
204
205 try:
206 dpid = hex(dpid)[2:]
207 dpid = '0'*(16-len(dpid))+dpid
208 return dpid
209 except IndexError:
210 raise Exception('Unable to derive default datapath ID - '
211 'please either specify a dpid or use a '
212 'canonical switch name such as s23.')
213
214 def pid_exists(pid):
215 "Check whether pid exists in the current process table."
216
217 if pid <= 0:
218 return False
219 try:
220 os.kill(pid, 0)
221 except OSError as err:
222 if err.errno == errno.ESRCH:
223 # ESRCH == No such process
224 return False
225 elif err.errno == errno.EPERM:
226 # EPERM clearly means there's a process to deny access to
227 return True
228 else:
229 # According to "man 2 kill" possible error values are
230 # (EINVAL, EPERM, ESRCH)
231 raise
232 else:
233 return True
234
235 def get_textdiff(text1, text2, title1="", title2="", **opts):
236 "Returns empty string if same or formatted diff"
237
238 diff = '\n'.join(difflib.unified_diff(text1, text2,
239 fromfile=title1, tofile=title2, **opts))
240 # Clean up line endings
241 diff = os.linesep.join([s for s in diff.splitlines() if s])
242 return diff
243
244 def difflines(text1, text2, title1='', title2='', **opts):
245 "Wrapper for get_textdiff to avoid string transformations."
246 text1 = ('\n'.join(text1.rstrip().splitlines()) + '\n').splitlines(1)
247 text2 = ('\n'.join(text2.rstrip().splitlines()) + '\n').splitlines(1)
248 return get_textdiff(text1, text2, title1, title2, **opts)
249
250 def get_file(content):
251 """
252 Generates a temporary file in '/tmp' with `content` and returns the file name.
253 """
254 fde = tempfile.NamedTemporaryFile(mode='w', delete=False)
255 fname = fde.name
256 fde.write(content)
257 fde.close()
258 return fname
259
260 def normalize_text(text):
261 """
262 Strips formating spaces/tabs and carriage returns.
263 """
264 text = re.sub(r'[ \t]+', ' ', text)
265 text = re.sub(r'\r', '', text)
266 return text
267
268 def version_cmp(v1, v2):
269 """
270 Compare two version strings and returns:
271
272 * `-1`: if `v1` is less than `v2`
273 * `0`: if `v1` is equal to `v2`
274 * `1`: if `v1` is greater than `v2`
275
276 Raises `ValueError` if versions are not well formated.
277 """
278 vregex = r'(?P<whole>\d+(\.(\d+))*)'
279 v1m = re.match(vregex, v1)
280 v2m = re.match(vregex, v2)
281 if v1m is None or v2m is None:
282 raise ValueError("got a invalid version string")
283
284 # Split values
285 v1g = v1m.group('whole').split('.')
286 v2g = v2m.group('whole').split('.')
287
288 # Get the longest version string
289 vnum = len(v1g)
290 if len(v2g) > vnum:
291 vnum = len(v2g)
292
293 # Reverse list because we are going to pop the tail
294 v1g.reverse()
295 v2g.reverse()
296 for _ in range(vnum):
297 try:
298 v1n = int(v1g.pop())
299 except IndexError:
300 while v2g:
301 v2n = int(v2g.pop())
302 if v2n > 0:
303 return -1
304 break
305
306 try:
307 v2n = int(v2g.pop())
308 except IndexError:
309 if v1n > 0:
310 return 1
311 while v1g:
312 v1n = int(v1g.pop())
313 if v1n > 0:
314 return 1
315 break
316
317 if v1n > v2n:
318 return 1
319 if v1n < v2n:
320 return -1
321 return 0
322
323 def ip4_route(node):
324 """
325 Gets a structured return of the command 'ip route'. It can be used in
326 conjuction with json_cmp() to provide accurate assert explanations.
327
328 Return example:
329 {
330 '10.0.1.0/24': {
331 'dev': 'eth0',
332 'via': '172.16.0.1',
333 'proto': '188',
334 },
335 '10.0.2.0/24': {
336 'dev': 'eth1',
337 'proto': 'kernel',
338 }
339 }
340 """
341 output = normalize_text(node.run('ip route')).splitlines()
342 result = {}
343 for line in output:
344 columns = line.split(' ')
345 route = result[columns[0]] = {}
346 prev = None
347 for column in columns:
348 if prev == 'dev':
349 route['dev'] = column
350 if prev == 'via':
351 route['via'] = column
352 if prev == 'proto':
353 route['proto'] = column
354 if prev == 'metric':
355 route['metric'] = column
356 if prev == 'scope':
357 route['scope'] = column
358 prev = column
359
360 return result
361
362 def ip6_route(node):
363 """
364 Gets a structured return of the command 'ip -6 route'. It can be used in
365 conjuction with json_cmp() to provide accurate assert explanations.
366
367 Return example:
368 {
369 '2001:db8:1::/64': {
370 'dev': 'eth0',
371 'proto': '188',
372 },
373 '2001:db8:2::/64': {
374 'dev': 'eth1',
375 'proto': 'kernel',
376 }
377 }
378 """
379 output = normalize_text(node.run('ip -6 route')).splitlines()
380 result = {}
381 for line in output:
382 columns = line.split(' ')
383 route = result[columns[0]] = {}
384 prev = None
385 for column in columns:
386 if prev == 'dev':
387 route['dev'] = column
388 if prev == 'via':
389 route['via'] = column
390 if prev == 'proto':
391 route['proto'] = column
392 if prev == 'metric':
393 route['metric'] = column
394 if prev == 'pref':
395 route['pref'] = column
396 prev = column
397
398 return result
399
400 def sleep(amount, reason=None):
401 """
402 Sleep wrapper that registers in the log the amount of sleep
403 """
404 if reason is None:
405 logger.info('Sleeping for {} seconds'.format(amount))
406 else:
407 logger.info(reason + ' ({} seconds)'.format(amount))
408
409 time.sleep(amount)
410
411 def checkAddressSanitizerError(output, router, component):
412 "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
413
414 addressSantizerError = re.search('(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ', output)
415 if addressSantizerError:
416 sys.stderr.write("%s: %s triggered an exception by AddressSanitizer\n" % (router, component))
417 # Sanitizer Error found in log
418 pidMark = addressSantizerError.group(1)
419 addressSantizerLog = re.search('%s(.*)%s' % (pidMark, pidMark), output, re.DOTALL)
420 if addressSantizerLog:
421 callingTest = os.path.basename(sys._current_frames().values()[0].f_back.f_back.f_globals['__file__'])
422 callingProc = sys._getframe(2).f_code.co_name
423 with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile:
424 sys.stderr.write('\n'.join(addressSantizerLog.group(1).splitlines()) + '\n')
425 addrSanFile.write("## Error: %s\n\n" % addressSantizerError.group(2))
426 addrSanFile.write("### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" % (callingTest, callingProc, router))
427 addrSanFile.write(' '+ '\n '.join(addressSantizerLog.group(1).splitlines()) + '\n')
428 addrSanFile.write("\n---------------\n")
429 return True
430 return False
431
432 def addRouter(topo, name):
433 "Adding a FRRouter (or Quagga) to Topology"
434
435 MyPrivateDirs = ['/etc/frr',
436 '/etc/quagga',
437 '/var/run/frr',
438 '/var/run/quagga',
439 '/var/log']
440 return topo.addNode(name, cls=Router, privateDirs=MyPrivateDirs)
441
442 def set_sysctl(node, sysctl, value):
443 "Set a sysctl value and return None on success or an error string"
444 valuestr = '{}'.format(value)
445 command = "sysctl {0}={1}".format(sysctl, valuestr)
446 cmdret = node.cmd(command)
447
448 matches = re.search(r'([^ ]+) = ([^\s]+)', cmdret)
449 if matches is None:
450 return cmdret
451 if matches.group(1) != sysctl:
452 return cmdret
453 if matches.group(2) != valuestr:
454 return cmdret
455
456 return None
457
458 def assert_sysctl(node, sysctl, value):
459 "Set and assert that the sysctl is set with the specified value."
460 assert set_sysctl(node, sysctl, value) is None
461
462 class LinuxRouter(Node):
463 "A Node with IPv4/IPv6 forwarding enabled."
464
465 def config(self, **params):
466 super(LinuxRouter, self).config(**params)
467 # Enable forwarding on the router
468 assert_sysctl(self, 'net.ipv4.ip_forward', 1)
469 assert_sysctl(self, 'net.ipv6.conf.all.forwarding', 1)
470 def terminate(self):
471 """
472 Terminate generic LinuxRouter Mininet instance
473 """
474 set_sysctl(self, 'net.ipv4.ip_forward', 0)
475 set_sysctl(self, 'net.ipv6.conf.all.forwarding', 0)
476 super(LinuxRouter, self).terminate()
477
478 class Router(Node):
479 "A Node with IPv4/IPv6 forwarding enabled and Quagga as Routing Engine"
480
481 def __init__(self, name, **params):
482 super(Router, self).__init__(name, **params)
483 self.logdir = params.get('logdir', get_test_logdir(name, True))
484 self.daemondir = None
485 self.hasmpls = False
486 self.routertype = 'frr'
487 self.daemons = {'zebra': 0, 'ripd': 0, 'ripngd': 0, 'ospfd': 0,
488 'ospf6d': 0, 'isisd': 0, 'bgpd': 0, 'pimd': 0,
489 'ldpd': 0, 'eigrpd': 0, 'nhrpd': 0}
490 self.daemons_options = {'zebra': ''}
491 self.reportCores = True
492
493 def _config_frr(self, **params):
494 "Configure FRR binaries"
495 self.daemondir = params.get('frrdir')
496 if self.daemondir is None:
497 self.daemondir = '/usr/lib/frr'
498
499 zebra_path = os.path.join(self.daemondir, 'zebra')
500 if not os.path.isfile(zebra_path):
501 raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
502
503 def _config_quagga(self, **params):
504 "Configure Quagga binaries"
505 self.daemondir = params.get('quaggadir')
506 if self.daemondir is None:
507 self.daemondir = '/usr/lib/quagga'
508
509 zebra_path = os.path.join(self.daemondir, 'zebra')
510 if not os.path.isfile(zebra_path):
511 raise Exception("Quagga zebra binary doesn't exist at {}".format(zebra_path))
512
513 # pylint: disable=W0221
514 # Some params are only meaningful for the parent class.
515 def config(self, **params):
516 super(Router, self).config(**params)
517
518 # User did not specify the daemons directory, try to autodetect it.
519 self.daemondir = params.get('daemondir')
520 if self.daemondir is None:
521 self.routertype = params.get('routertype', 'frr')
522 if self.routertype == 'quagga':
523 self._config_quagga(**params)
524 else:
525 self._config_frr(**params)
526 else:
527 # Test the provided path
528 zpath = os.path.join(self.daemondir, 'zebra')
529 if not os.path.isfile(zpath):
530 raise Exception('No zebra binary found in {}'.format(zpath))
531 # Allow user to specify routertype when the path was specified.
532 if params.get('routertype') is not None:
533 self.routertype = self.params.get('routertype')
534
535 # Enable forwarding on the router
536 assert_sysctl(self, 'net.ipv4.ip_forward', 1)
537 assert_sysctl(self, 'net.ipv6.conf.all.forwarding', 1)
538 # Enable coredumps
539 assert_sysctl(self, 'kernel.core_uses_pid', 1)
540 assert_sysctl(self, 'fs.suid_dumpable', 1)
541 #this applies to the kernel not the namespace...
542 #original on ubuntu 17.x, but apport won't save as in namespace
543 # |/usr/share/apport/apport %p %s %c %d %P
544 corefile = '%e_core-sig_%s-pid_%p.dmp'
545 assert_sysctl(self, 'kernel.core_pattern', corefile)
546 self.cmd('ulimit -c unlimited')
547 # Set ownership of config files
548 self.cmd('chown {0}:{0}vty /etc/{0}'.format(self.routertype))
549
550 def terminate(self):
551 # Delete Running Quagga or FRR Daemons
552 self.stopRouter()
553 # rundaemons = self.cmd('ls -1 /var/run/%s/*.pid' % self.routertype)
554 # for d in StringIO.StringIO(rundaemons):
555 # self.cmd('kill -7 `cat %s`' % d.rstrip())
556 # self.waitOutput()
557 # Disable forwarding
558 set_sysctl(self, 'net.ipv4.ip_forward', 0)
559 set_sysctl(self, 'net.ipv6.conf.all.forwarding', 0)
560 super(Router, self).terminate()
561 def stopRouter(self, wait=True):
562 # Stop Running Quagga or FRR Daemons
563 rundaemons = self.cmd('ls -1 /var/run/%s/*.pid' % self.routertype)
564 if re.search(r"No such file or directory", rundaemons):
565 return
566 if rundaemons is not None:
567 numRunning = 0
568 for d in StringIO.StringIO(rundaemons):
569 daemonpid = self.cmd('cat %s' % d.rstrip()).rstrip()
570 if (daemonpid.isdigit() and pid_exists(int(daemonpid))):
571 logger.info('{}: stopping {}'.format(
572 self.name,
573 os.path.basename(d.rstrip().rsplit(".", 1)[0])
574 ))
575 self.cmd('kill -TERM %s' % daemonpid)
576 self.waitOutput()
577 if pid_exists(int(daemonpid)):
578 numRunning += 1
579 if wait and numRunning > 0:
580 sleep(2, '{}: waiting for daemons stopping'.format(self.name))
581 # 2nd round of kill if daemons didn't exit
582 for d in StringIO.StringIO(rundaemons):
583 daemonpid = self.cmd('cat %s' % d.rstrip()).rstrip()
584 if (daemonpid.isdigit() and pid_exists(int(daemonpid))):
585 logger.info('{}: killing {}'.format(
586 self.name,
587 os.path.basename(d.rstrip().rsplit(".", 1)[0])
588 ))
589 self.cmd('kill -7 %s' % daemonpid)
590 self.waitOutput()
591 self.cmd('rm -- {}'.format(d.rstrip()))
592 if wait:
593 self.checkRouterCores(reportOnce=True)
594
595 def removeIPs(self):
596 for interface in self.intfNames():
597 self.cmd('ip address flush', interface)
598
599 def checkCapability(self, daemon, param):
600 if param is not None:
601 daemon_path = os.path.join(self.daemondir, daemon)
602 daemon_search_option = param.replace('-','')
603 output = self.cmd('{0} -h | grep {1}'.format(
604 daemon_path, daemon_search_option))
605 if daemon_search_option not in output:
606 return False
607 return True
608
609 def loadConf(self, daemon, source=None, param=None):
610 # print "Daemons before:", self.daemons
611 if daemon in self.daemons.keys():
612 self.daemons[daemon] = 1
613 if param is not None:
614 self.daemons_options[daemon] = param
615 if source is None:
616 self.cmd('touch /etc/%s/%s.conf' % (self.routertype, daemon))
617 self.waitOutput()
618 else:
619 self.cmd('cp %s /etc/%s/%s.conf' % (source, self.routertype, daemon))
620 self.waitOutput()
621 self.cmd('chmod 640 /etc/%s/%s.conf' % (self.routertype, daemon))
622 self.waitOutput()
623 self.cmd('chown %s:%s /etc/%s/%s.conf' % (self.routertype, self.routertype, self.routertype, daemon))
624 self.waitOutput()
625 else:
626 logger.info('No daemon {} known'.format(daemon))
627 # print "Daemons after:", self.daemons
628
629 def startRouter(self, tgen=None):
630 # Disable integrated-vtysh-config
631 self.cmd('echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf' % self.routertype)
632 self.cmd('chown %s:%svty /etc/%s/vtysh.conf' % (self.routertype, self.routertype, self.routertype))
633 # TODO remove the following lines after all tests are migrated to Topogen.
634 # Try to find relevant old logfiles in /tmp and delete them
635 map(os.remove, glob.glob('{}/{}/*.log'.format(self.logdir, self.name)))
636 # Remove old core files
637 map(os.remove, glob.glob('{}/{}/*.dmp'.format(self.logdir, self.name)))
638 # Remove IP addresses from OS first - we have them in zebra.conf
639 self.removeIPs()
640 # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
641 # No error - but return message and skip all the tests
642 if self.daemons['ldpd'] == 1:
643 ldpd_path = os.path.join(self.daemondir, 'ldpd')
644 if not os.path.isfile(ldpd_path):
645 logger.info("LDP Test, but no ldpd compiled or installed")
646 return "LDP Test, but no ldpd compiled or installed"
647
648 if version_cmp(platform.release(), '4.5') < 0:
649 logger.info("LDP Test need Linux Kernel 4.5 minimum")
650 return "LDP Test need Linux Kernel 4.5 minimum"
651 # Check if have mpls
652 if tgen != None:
653 self.hasmpls = tgen.hasmpls
654 if self.hasmpls != True:
655 logger.info("LDP/MPLS Tests will be skipped, platform missing module(s)")
656 else:
657 # Test for MPLS Kernel modules available
658 self.hasmpls = False
659 if os.system('/sbin/modprobe mpls-router') != 0:
660 logger.info('MPLS tests will not run (missing mpls-router kernel module)')
661 elif os.system('/sbin/modprobe mpls-iptunnel') != 0:
662 logger.info('MPLS tests will not run (missing mpls-iptunnel kernel module)')
663 else:
664 self.hasmpls = True
665 if self.hasmpls != True:
666 return "LDP/MPLS Tests need mpls kernel modules"
667 self.cmd('echo 100000 > /proc/sys/net/mpls/platform_labels')
668
669 if self.daemons['eigrpd'] == 1:
670 eigrpd_path = os.path.join(self.daemondir, 'eigrpd')
671 if not os.path.isfile(eigrpd_path):
672 logger.info("EIGRP Test, but no eigrpd compiled or installed")
673 return "EIGRP Test, but no eigrpd compiled or installed"
674
675 self.restartRouter()
676 return ""
677
678 def restartRouter(self):
679 # Starts actual daemons without init (ie restart)
680 # cd to per node directory
681 self.cmd('cd {}/{}'.format(self.logdir, self.name))
682 #Re-enable to allow for report per run
683 self.reportCores = True
684 # Start Zebra first
685 if self.daemons['zebra'] == 1:
686 zebra_path = os.path.join(self.daemondir, 'zebra')
687 zebra_option = self.daemons_options['zebra']
688 self.cmd('{0} {1} > zebra.out 2> zebra.err &'.format(
689 zebra_path, zebra_option, self.logdir, self.name
690 ))
691 self.waitOutput()
692 logger.debug('{}: {} zebra started'.format(self, self.routertype))
693 sleep(1, '{}: waiting for zebra to start'.format(self.name))
694 # Fix Link-Local Addresses
695 # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
696 self.cmd('for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS=\':\'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done')
697 # Now start all the other daemons
698 for daemon in self.daemons:
699 # Skip disabled daemons and zebra
700 if self.daemons[daemon] == 0 or daemon == 'zebra':
701 continue
702
703 daemon_path = os.path.join(self.daemondir, daemon)
704 self.cmd('{0} > {3}.out 2> {3}.err &'.format(
705 daemon_path, self.logdir, self.name, daemon
706 ))
707 self.waitOutput()
708 logger.debug('{}: {} {} started'.format(self, self.routertype, daemon))
709 def getStdErr(self, daemon):
710 return self.getLog('err', daemon)
711 def getStdOut(self, daemon):
712 return self.getLog('out', daemon)
713 def getLog(self, log, daemon):
714 return self.cmd('cat {}/{}/{}.{}'.format(self.logdir, self.name, daemon, log))
715
716 def checkRouterCores(self, reportLeaks=True, reportOnce=False):
717 if reportOnce and not self.reportCores:
718 return
719 reportMade = False
720 for daemon in self.daemons:
721 if (self.daemons[daemon] == 1):
722 # Look for core file
723 corefiles = glob.glob('{}/{}/{}_core*.dmp'.format(
724 self.logdir, self.name, daemon))
725 if (len(corefiles) > 0):
726 daemon_path = os.path.join(self.daemondir, daemon)
727 backtrace = subprocess.check_output([
728 "gdb {} {} --batch -ex bt 2> /dev/null".format(daemon_path, corefiles[0])
729 ], shell=True)
730 sys.stderr.write("\n%s: %s crashed. Core file found - Backtrace follows:\n" % (self.name, daemon))
731 sys.stderr.write("%s" % backtrace)
732 reportMade = True
733 elif reportLeaks:
734 log = self.getStdErr(daemon)
735 if "memstats" in log:
736 sys.stderr.write("%s: %s has memory leaks:\n" % (self.name, daemon))
737 log = re.sub("core_handler: ", "", log)
738 log = re.sub(r"(showing active allocations in memory group [a-zA-Z0-9]+)", r"\n ## \1", log)
739 log = re.sub("memstats: ", " ", log)
740 sys.stderr.write(log)
741 reportMade = True
742 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
743 if checkAddressSanitizerError(self.getStdErr(daemon), self.name, daemon):
744 sys.stderr.write("%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon))
745 reportMade = True
746 if reportMade:
747 self.reportCores = False
748
749 def checkRouterRunning(self):
750 "Check if router daemons are running and collect crashinfo they don't run"
751
752 global fatal_error
753
754 daemonsRunning = self.cmd('vtysh -c "show log" | grep "Logging configuration for"')
755 # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
756 if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"):
757 return "%s: vtysh killed by AddressSanitizer" % (self.name)
758
759 for daemon in self.daemons:
760 if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
761 sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
762 # Look for core file
763 corefiles = glob.glob('{}/{}/{}_core*.dmp'.format(
764 self.logdir, self.name, daemon))
765 if (len(corefiles) > 0):
766 daemon_path = os.path.join(self.daemondir, daemon)
767 backtrace = subprocess.check_output([
768 "gdb {} {} --batch -ex bt 2> /dev/null".format(daemon_path, corefiles[0])
769 ], shell=True)
770 sys.stderr.write("\n%s: %s crashed. Core file found - Backtrace follows:\n" % (self.name, daemon))
771 sys.stderr.write("%s\n" % backtrace)
772 else:
773 # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
774 if os.path.isfile('{}/{}/{}.log'.format(self.logdir, self.name, daemon)):
775 log_tail = subprocess.check_output([
776 "tail -n20 {}/{}/{}.log 2> /dev/null".format(
777 self.logdir, self.name, daemon)
778 ], shell=True)
779 sys.stderr.write("\nFrom %s %s %s log file:\n" % (self.routertype, self.name, daemon))
780 sys.stderr.write("%s\n" % log_tail)
781
782 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
783 if checkAddressSanitizerError(self.getStdErr(daemon), self.name, daemon):
784 return "%s: Daemon %s not running - killed by AddressSanitizer" % (self.name, daemon)
785
786 return "%s: Daemon %s not running" % (self.name, daemon)
787 return ""
788 def get_ipv6_linklocal(self):
789 "Get LinkLocal Addresses from interfaces"
790
791 linklocal = []
792
793 ifaces = self.cmd('ip -6 address')
794 # Fix newlines (make them all the same)
795 ifaces = ('\n'.join(ifaces.splitlines()) + '\n').splitlines()
796 interface=""
797 ll_per_if_count=0
798 for line in ifaces:
799 m = re.search('[0-9]+: ([^:@]+)[@if0-9:]+ <', line)
800 if m:
801 interface = m.group(1)
802 ll_per_if_count = 0
803 m = re.search('inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link', line)
804 if m:
805 local = m.group(1)
806 ll_per_if_count += 1
807 if (ll_per_if_count > 1):
808 linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
809 else:
810 linklocal += [[interface, local]]
811 return linklocal
812 def daemon_available(self, daemon):
813 "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
814
815 daemon_path = os.path.join(self.daemondir, daemon)
816 if not os.path.isfile(daemon_path):
817 return False
818 if (daemon == 'ldpd'):
819 if version_cmp(platform.release(), '4.5') < 0:
820 return False
821 if self.cmd('/sbin/modprobe -n mpls-router' ) != "":
822 return False
823 if self.cmd('/sbin/modprobe -n mpls-iptunnel') != "":
824 return False
825
826 return True
827 def get_routertype(self):
828 "Return the type of Router (frr or quagga)"
829
830 return self.routertype
831 def report_memory_leaks(self, filename_prefix, testscript):
832 "Report Memory Leaks to file prefixed with given string"
833
834 leakfound = False
835 filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt"
836 for daemon in self.daemons:
837 if (self.daemons[daemon] == 1):
838 log = self.getStdErr(daemon)
839 if "memstats" in log:
840 # Found memory leak
841 logger.info('\nRouter {} {} StdErr Log:\n{}'.format(
842 self.name, daemon, log))
843 if not leakfound:
844 leakfound = True
845 # Check if file already exists
846 fileexists = os.path.isfile(filename)
847 leakfile = open(filename, "a")
848 if not fileexists:
849 # New file - add header
850 leakfile.write("# Memory Leak Detection for topotest %s\n\n" % testscript)
851 leakfile.write("## Router %s\n" % self.name)
852 leakfile.write("### Process %s\n" % daemon)
853 log = re.sub("core_handler: ", "", log)
854 log = re.sub(r"(showing active allocations in memory group [a-zA-Z0-9]+)", r"\n#### \1\n", log)
855 log = re.sub("memstats: ", " ", log)
856 leakfile.write(log)
857 leakfile.write("\n")
858 if leakfound:
859 leakfile.close()
860
861
862 class LegacySwitch(OVSSwitch):
863 "A Legacy Switch without OpenFlow"
864
865 def __init__(self, name, **params):
866 OVSSwitch.__init__(self, name, failMode='standalone', **params)
867 self.switchIP = None