]> git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/lib/topotest.py
lib: allow all rw access to /tmp/topotests (for package support and debug)
[mirror_frr.git] / tests / topotests / lib / topotest.py
1 #!/usr/bin/env python
2
3 #
4 # topotest.py
5 # Library of helper functions for NetDEF Topology Tests
6 #
7 # Copyright (c) 2016 by
8 # Network Device Education Foundation, Inc. ("NetDEF")
9 #
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
13 # in all copies.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 # OF THIS SOFTWARE.
23 #
24
25 import json
26 import os
27 import errno
28 import re
29 import sys
30 import glob
31 import StringIO
32 import subprocess
33 import tempfile
34 import platform
35 import difflib
36 import time
37
38 from lib.topolog import logger
39
40 from mininet.topo import Topo
41 from mininet.net import Mininet
42 from mininet.node import Node, OVSSwitch, Host
43 from mininet.log import setLogLevel, info
44 from mininet.cli import CLI
45 from mininet.link import Intf
46
47 class json_cmp_result(object):
48 "json_cmp result class for better assertion messages"
49
50 def __init__(self):
51 self.errors = []
52
53 def add_error(self, error):
54 "Append error message to the result"
55 for line in error.splitlines():
56 self.errors.append(line)
57
58 def has_errors(self):
59 "Returns True if there were errors, otherwise False."
60 return len(self.errors) > 0
61
62 def get_test_logdir(node=None, init=False):
63 """
64 Return the current test log directory based on PYTEST_CURRENT_TEST
65 environment variable.
66 Optional paramters:
67 node: when set, adds the node specific log directory to the init dir
68 init: when set, initializes the log directory and fixes path permissions
69 """
70 cur_test = os.environ['PYTEST_CURRENT_TEST']
71
72 ret = '/tmp/topotests/' + cur_test[0:cur_test.find(".py")].replace('/','.')
73 if node != None:
74 dir = ret + "/" + node
75 if init:
76 os.system('mkdir -p ' + dir)
77 os.system('chmod -R go+rw /tmp/topotests')
78 return ret
79
80 def json_diff(d1, d2):
81 """
82 Returns a string with the difference between JSON data.
83 """
84 json_format_opts = {
85 'indent': 4,
86 'sort_keys': True,
87 }
88 dstr1 = json.dumps(d1, **json_format_opts)
89 dstr2 = json.dumps(d2, **json_format_opts)
90 return difflines(dstr2, dstr1, title1='Expected value', title2='Current value', n=0)
91
92 def json_cmp(d1, d2):
93 """
94 JSON compare function. Receives two parameters:
95 * `d1`: json value
96 * `d2`: json subset which we expect
97
98 Returns `None` when all keys that `d1` has matches `d2`,
99 otherwise a string containing what failed.
100
101 Note: key absence can be tested by adding a key with value `None`.
102 """
103 squeue = [(d1, d2, 'json')]
104 result = json_cmp_result()
105 for s in squeue:
106 nd1, nd2, parent = s
107 s1, s2 = set(nd1), set(nd2)
108
109 # Expect all required fields to exist.
110 s2_req = set([key for key in nd2 if nd2[key] is not None])
111 diff = s2_req - s1
112 if diff != set({}):
113 result.add_error('expected key(s) {} in {} (have {}):\n{}'.format(
114 str(list(diff)), parent, str(list(s1)), json_diff(nd1, nd2)))
115
116 for key in s2.intersection(s1):
117 # Test for non existence of key in d2
118 if nd2[key] is None:
119 result.add_error('"{}" should not exist in {} (have {}):\n{}'.format(
120 key, parent, str(s1), json_diff(nd1[key], nd2[key])))
121 continue
122 # If nd1 key is a dict, we have to recurse in it later.
123 if isinstance(nd2[key], type({})):
124 if not isinstance(nd1[key], type({})):
125 result.add_error(
126 '{}["{}"] has different type than expected '.format(parent, key) +
127 '(have {}, expected {}):\n{}'.format(
128 type(nd1[key]), type(nd2[key]), json_diff(nd1[key], nd2[key])))
129 continue
130 nparent = '{}["{}"]'.format(parent, key)
131 squeue.append((nd1[key], nd2[key], nparent))
132 continue
133 # Check list items
134 if isinstance(nd2[key], type([])):
135 if not isinstance(nd1[key], type([])):
136 result.add_error(
137 '{}["{}"] has different type than expected '.format(parent, key) +
138 '(have {}, expected {}):\n{}'.format(
139 type(nd1[key]), type(nd2[key]), json_diff(nd1[key], nd2[key])))
140 continue
141 # Check list size
142 if len(nd2[key]) > len(nd1[key]):
143 result.add_error(
144 '{}["{}"] too few items '.format(parent, key) +
145 '(have {}, expected {}:\n {})'.format(
146 len(nd1[key]), len(nd2[key]),
147 json_diff(nd1[key], nd2[key])))
148 continue
149
150 # List all unmatched items errors
151 unmatched = []
152 for expected in nd2[key]:
153 matched = False
154 for value in nd1[key]:
155 if json_cmp({'json': value}, {'json': expected}) is None:
156 matched = True
157 break
158
159 if matched:
160 break
161 if not matched:
162 unmatched.append(expected)
163
164 # If there are unmatched items, error out.
165 if unmatched:
166 result.add_error(
167 '{}["{}"] value is different (\n{})'.format(
168 parent, key, json_diff(nd1[key], nd2[key])))
169 continue
170
171 # Compare JSON values
172 if nd1[key] != nd2[key]:
173 result.add_error(
174 '{}["{}"] value is different (\n{})'.format(
175 parent, key, json_diff(nd1[key], nd2[key])))
176 continue
177
178 if result.has_errors():
179 return result
180
181 return None
182
183 def run_and_expect(func, what, count=20, wait=3):
184 """
185 Run `func` and compare the result with `what`. Do it for `count` times
186 waiting `wait` seconds between tries. By default it tries 20 times with
187 3 seconds delay between tries.
188
189 Returns (True, func-return) on success or
190 (False, func-return) on failure.
191 """
192 while count > 0:
193 result = func()
194 if result != what:
195 time.sleep(wait)
196 count -= 1
197 continue
198 return (True, result)
199 return (False, result)
200
201
202 def int2dpid(dpid):
203 "Converting Integer to DPID"
204
205 try:
206 dpid = hex(dpid)[2:]
207 dpid = '0'*(16-len(dpid))+dpid
208 return dpid
209 except IndexError:
210 raise Exception('Unable to derive default datapath ID - '
211 'please either specify a dpid or use a '
212 'canonical switch name such as s23.')
213
214 def pid_exists(pid):
215 "Check whether pid exists in the current process table."
216
217 if pid <= 0:
218 return False
219 try:
220 os.kill(pid, 0)
221 except OSError as err:
222 if err.errno == errno.ESRCH:
223 # ESRCH == No such process
224 return False
225 elif err.errno == errno.EPERM:
226 # EPERM clearly means there's a process to deny access to
227 return True
228 else:
229 # According to "man 2 kill" possible error values are
230 # (EINVAL, EPERM, ESRCH)
231 raise
232 else:
233 return True
234
235 def get_textdiff(text1, text2, title1="", title2="", **opts):
236 "Returns empty string if same or formatted diff"
237
238 diff = '\n'.join(difflib.unified_diff(text1, text2,
239 fromfile=title1, tofile=title2, **opts))
240 # Clean up line endings
241 diff = os.linesep.join([s for s in diff.splitlines() if s])
242 return diff
243
244 def difflines(text1, text2, title1='', title2='', **opts):
245 "Wrapper for get_textdiff to avoid string transformations."
246 text1 = ('\n'.join(text1.rstrip().splitlines()) + '\n').splitlines(1)
247 text2 = ('\n'.join(text2.rstrip().splitlines()) + '\n').splitlines(1)
248 return get_textdiff(text1, text2, title1, title2, **opts)
249
250 def get_file(content):
251 """
252 Generates a temporary file in '/tmp' with `content` and returns the file name.
253 """
254 fde = tempfile.NamedTemporaryFile(mode='w', delete=False)
255 fname = fde.name
256 fde.write(content)
257 fde.close()
258 return fname
259
260 def normalize_text(text):
261 """
262 Strips formating spaces/tabs and carriage returns.
263 """
264 text = re.sub(r'[ \t]+', ' ', text)
265 text = re.sub(r'\r', '', text)
266 return text
267
268 def version_cmp(v1, v2):
269 """
270 Compare two version strings and returns:
271
272 * `-1`: if `v1` is less than `v2`
273 * `0`: if `v1` is equal to `v2`
274 * `1`: if `v1` is greater than `v2`
275
276 Raises `ValueError` if versions are not well formated.
277 """
278 vregex = r'(?P<whole>\d+(\.(\d+))*)'
279 v1m = re.match(vregex, v1)
280 v2m = re.match(vregex, v2)
281 if v1m is None or v2m is None:
282 raise ValueError("got a invalid version string")
283
284 # Split values
285 v1g = v1m.group('whole').split('.')
286 v2g = v2m.group('whole').split('.')
287
288 # Get the longest version string
289 vnum = len(v1g)
290 if len(v2g) > vnum:
291 vnum = len(v2g)
292
293 # Reverse list because we are going to pop the tail
294 v1g.reverse()
295 v2g.reverse()
296 for _ in range(vnum):
297 try:
298 v1n = int(v1g.pop())
299 except IndexError:
300 while v2g:
301 v2n = int(v2g.pop())
302 if v2n > 0:
303 return -1
304 break
305
306 try:
307 v2n = int(v2g.pop())
308 except IndexError:
309 if v1n > 0:
310 return 1
311 while v1g:
312 v1n = int(v1g.pop())
313 if v1n > 0:
314 return 1
315 break
316
317 if v1n > v2n:
318 return 1
319 if v1n < v2n:
320 return -1
321 return 0
322
323 def ip4_route(node):
324 """
325 Gets a structured return of the command 'ip route'. It can be used in
326 conjuction with json_cmp() to provide accurate assert explanations.
327
328 Return example:
329 {
330 '10.0.1.0/24': {
331 'dev': 'eth0',
332 'via': '172.16.0.1',
333 'proto': '188',
334 },
335 '10.0.2.0/24': {
336 'dev': 'eth1',
337 'proto': 'kernel',
338 }
339 }
340 """
341 output = normalize_text(node.run('ip route')).splitlines()
342 result = {}
343 for line in output:
344 columns = line.split(' ')
345 route = result[columns[0]] = {}
346 prev = None
347 for column in columns:
348 if prev == 'dev':
349 route['dev'] = column
350 if prev == 'via':
351 route['via'] = column
352 if prev == 'proto':
353 route['proto'] = column
354 if prev == 'metric':
355 route['metric'] = column
356 if prev == 'scope':
357 route['scope'] = column
358 prev = column
359
360 return result
361
362 def ip6_route(node):
363 """
364 Gets a structured return of the command 'ip -6 route'. It can be used in
365 conjuction with json_cmp() to provide accurate assert explanations.
366
367 Return example:
368 {
369 '2001:db8:1::/64': {
370 'dev': 'eth0',
371 'proto': '188',
372 },
373 '2001:db8:2::/64': {
374 'dev': 'eth1',
375 'proto': 'kernel',
376 }
377 }
378 """
379 output = normalize_text(node.run('ip -6 route')).splitlines()
380 result = {}
381 for line in output:
382 columns = line.split(' ')
383 route = result[columns[0]] = {}
384 prev = None
385 for column in columns:
386 if prev == 'dev':
387 route['dev'] = column
388 if prev == 'via':
389 route['via'] = column
390 if prev == 'proto':
391 route['proto'] = column
392 if prev == 'metric':
393 route['metric'] = column
394 if prev == 'pref':
395 route['pref'] = column
396 prev = column
397
398 return result
399
400 def sleep(amount, reason=None):
401 """
402 Sleep wrapper that registers in the log the amount of sleep
403 """
404 if reason is None:
405 logger.info('Sleeping for {} seconds'.format(amount))
406 else:
407 logger.info(reason + ' ({} seconds)'.format(amount))
408
409 time.sleep(amount)
410
411 def checkAddressSanitizerError(output, router, component):
412 "Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
413
414 addressSantizerError = re.search('(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ', output)
415 if addressSantizerError:
416 sys.stderr.write("%s: %s triggered an exception by AddressSanitizer\n" % (router, component))
417 # Sanitizer Error found in log
418 pidMark = addressSantizerError.group(1)
419 addressSantizerLog = re.search('%s(.*)%s' % (pidMark, pidMark), output, re.DOTALL)
420 if addressSantizerLog:
421 callingTest = os.path.basename(sys._current_frames().values()[0].f_back.f_back.f_globals['__file__'])
422 callingProc = sys._getframe(2).f_code.co_name
423 with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile:
424 sys.stderr.write('\n'.join(addressSantizerLog.group(1).splitlines()) + '\n')
425 addrSanFile.write("## Error: %s\n\n" % addressSantizerError.group(2))
426 addrSanFile.write("### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" % (callingTest, callingProc, router))
427 addrSanFile.write(' '+ '\n '.join(addressSantizerLog.group(1).splitlines()) + '\n')
428 addrSanFile.write("\n---------------\n")
429 return True
430 return False
431
432 def addRouter(topo, name):
433 "Adding a FRRouter (or Quagga) to Topology"
434
435 MyPrivateDirs = ['/etc/frr',
436 '/etc/quagga',
437 '/var/run/frr',
438 '/var/run/quagga',
439 '/var/log']
440 return topo.addNode(name, cls=Router, privateDirs=MyPrivateDirs)
441
442 def set_sysctl(node, sysctl, value):
443 "Set a sysctl value and return None on success or an error string"
444 valuestr = '{}'.format(value)
445 command = "sysctl {0}={1}".format(sysctl, valuestr)
446 cmdret = node.cmd(command)
447
448 matches = re.search(r'([^ ]+) = ([^\s]+)', cmdret)
449 if matches is None:
450 return cmdret
451 if matches.group(1) != sysctl:
452 return cmdret
453 if matches.group(2) != valuestr:
454 return cmdret
455
456 return None
457
458 def assert_sysctl(node, sysctl, value):
459 "Set and assert that the sysctl is set with the specified value."
460 assert set_sysctl(node, sysctl, value) is None
461
462 class LinuxRouter(Node):
463 "A Node with IPv4/IPv6 forwarding enabled."
464
465 def config(self, **params):
466 super(LinuxRouter, self).config(**params)
467 # Enable forwarding on the router
468 assert_sysctl(self, 'net.ipv4.ip_forward', 1)
469 assert_sysctl(self, 'net.ipv6.conf.all.forwarding', 1)
470 def terminate(self):
471 """
472 Terminate generic LinuxRouter Mininet instance
473 """
474 set_sysctl(self, 'net.ipv4.ip_forward', 0)
475 set_sysctl(self, 'net.ipv6.conf.all.forwarding', 0)
476 super(LinuxRouter, self).terminate()
477
478 class Router(Node):
479 "A Node with IPv4/IPv6 forwarding enabled and Quagga as Routing Engine"
480
481 def __init__(self, name, **params):
482 super(Router, self).__init__(name, **params)
483 self.logdir = params.get('logdir', get_test_logdir(name, True))
484 self.daemondir = None
485 self.hasmpls = False
486 self.routertype = 'frr'
487 self.daemons = {'zebra': 0, 'ripd': 0, 'ripngd': 0, 'ospfd': 0,
488 'ospf6d': 0, 'isisd': 0, 'bgpd': 0, 'pimd': 0,
489 'ldpd': 0, 'eigrpd': 0, 'nhrpd': 0}
490 self.daemons_options = {'zebra': ''}
491 self.reportCores = True
492
493 def _config_frr(self, **params):
494 "Configure FRR binaries"
495 self.daemondir = params.get('frrdir')
496 if self.daemondir is None:
497 self.daemondir = '/usr/lib/frr'
498
499 zebra_path = os.path.join(self.daemondir, 'zebra')
500 if not os.path.isfile(zebra_path):
501 raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
502
503 def _config_quagga(self, **params):
504 "Configure Quagga binaries"
505 self.daemondir = params.get('quaggadir')
506 if self.daemondir is None:
507 self.daemondir = '/usr/lib/quagga'
508
509 zebra_path = os.path.join(self.daemondir, 'zebra')
510 if not os.path.isfile(zebra_path):
511 raise Exception("Quagga zebra binary doesn't exist at {}".format(zebra_path))
512
513 # pylint: disable=W0221
514 # Some params are only meaningful for the parent class.
515 def config(self, **params):
516 super(Router, self).config(**params)
517
518 # User did not specify the daemons directory, try to autodetect it.
519 self.daemondir = params.get('daemondir')
520 if self.daemondir is None:
521 self.routertype = params.get('routertype', 'frr')
522 if self.routertype == 'quagga':
523 self._config_quagga(**params)
524 else:
525 self._config_frr(**params)
526 else:
527 # Test the provided path
528 zpath = os.path.join(self.daemondir, 'zebra')
529 if not os.path.isfile(zpath):
530 raise Exception('No zebra binary found in {}'.format(zpath))
531 # Allow user to specify routertype when the path was specified.
532 if params.get('routertype') is not None:
533 self.routertype = self.params.get('routertype')
534
535 # Enable forwarding on the router
536 assert_sysctl(self, 'net.ipv4.ip_forward', 1)
537 assert_sysctl(self, 'net.ipv6.conf.all.forwarding', 1)
538 # Enable coredumps
539 assert_sysctl(self, 'kernel.core_uses_pid', 1)
540 assert_sysctl(self, 'fs.suid_dumpable', 1)
541 #this applies to the kernel not the namespace...
542 #original on ubuntu 17.x, but apport won't save as in namespace
543 # |/usr/share/apport/apport %p %s %c %d %P
544 corefile = '%e_core-sig_%s-pid_%p.dmp'
545 assert_sysctl(self, 'kernel.core_pattern', corefile)
546 self.cmd('ulimit -c unlimited')
547 # Set ownership of config files
548 self.cmd('chown {0}:{0}vty /etc/{0}'.format(self.routertype))
549
550 def terminate(self):
551 # Delete Running Quagga or FRR Daemons
552 self.stopRouter()
553 # rundaemons = self.cmd('ls -1 /var/run/%s/*.pid' % self.routertype)
554 # for d in StringIO.StringIO(rundaemons):
555 # self.cmd('kill -7 `cat %s`' % d.rstrip())
556 # self.waitOutput()
557 # Disable forwarding
558 set_sysctl(self, 'net.ipv4.ip_forward', 0)
559 set_sysctl(self, 'net.ipv6.conf.all.forwarding', 0)
560 super(Router, self).terminate()
561 os.system('chmod -R go+rw /tmp/topotests')
562
563 def stopRouter(self, wait=True):
564 # Stop Running Quagga or FRR Daemons
565 rundaemons = self.cmd('ls -1 /var/run/%s/*.pid' % self.routertype)
566 if re.search(r"No such file or directory", rundaemons):
567 return
568 if rundaemons is not None:
569 numRunning = 0
570 for d in StringIO.StringIO(rundaemons):
571 daemonpid = self.cmd('cat %s' % d.rstrip()).rstrip()
572 if (daemonpid.isdigit() and pid_exists(int(daemonpid))):
573 logger.info('{}: stopping {}'.format(
574 self.name,
575 os.path.basename(d.rstrip().rsplit(".", 1)[0])
576 ))
577 self.cmd('kill -TERM %s' % daemonpid)
578 self.waitOutput()
579 if pid_exists(int(daemonpid)):
580 numRunning += 1
581 if wait and numRunning > 0:
582 sleep(2, '{}: waiting for daemons stopping'.format(self.name))
583 # 2nd round of kill if daemons didn't exit
584 for d in StringIO.StringIO(rundaemons):
585 daemonpid = self.cmd('cat %s' % d.rstrip()).rstrip()
586 if (daemonpid.isdigit() and pid_exists(int(daemonpid))):
587 logger.info('{}: killing {}'.format(
588 self.name,
589 os.path.basename(d.rstrip().rsplit(".", 1)[0])
590 ))
591 self.cmd('kill -7 %s' % daemonpid)
592 self.waitOutput()
593 self.cmd('rm -- {}'.format(d.rstrip()))
594 if wait:
595 self.checkRouterCores(reportOnce=True)
596
597 def removeIPs(self):
598 for interface in self.intfNames():
599 self.cmd('ip address flush', interface)
600
601 def checkCapability(self, daemon, param):
602 if param is not None:
603 daemon_path = os.path.join(self.daemondir, daemon)
604 daemon_search_option = param.replace('-','')
605 output = self.cmd('{0} -h | grep {1}'.format(
606 daemon_path, daemon_search_option))
607 if daemon_search_option not in output:
608 return False
609 return True
610
611 def loadConf(self, daemon, source=None, param=None):
612 # print "Daemons before:", self.daemons
613 if daemon in self.daemons.keys():
614 self.daemons[daemon] = 1
615 if param is not None:
616 self.daemons_options[daemon] = param
617 if source is None:
618 self.cmd('touch /etc/%s/%s.conf' % (self.routertype, daemon))
619 self.waitOutput()
620 else:
621 self.cmd('cp %s /etc/%s/%s.conf' % (source, self.routertype, daemon))
622 self.waitOutput()
623 self.cmd('chmod 640 /etc/%s/%s.conf' % (self.routertype, daemon))
624 self.waitOutput()
625 self.cmd('chown %s:%s /etc/%s/%s.conf' % (self.routertype, self.routertype, self.routertype, daemon))
626 self.waitOutput()
627 else:
628 logger.info('No daemon {} known'.format(daemon))
629 # print "Daemons after:", self.daemons
630
631 def startRouter(self, tgen=None):
632 # Disable integrated-vtysh-config
633 self.cmd('echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf' % self.routertype)
634 self.cmd('chown %s:%svty /etc/%s/vtysh.conf' % (self.routertype, self.routertype, self.routertype))
635 # TODO remove the following lines after all tests are migrated to Topogen.
636 # Try to find relevant old logfiles in /tmp and delete them
637 map(os.remove, glob.glob('{}/{}/*.log'.format(self.logdir, self.name)))
638 # Remove old core files
639 map(os.remove, glob.glob('{}/{}/*.dmp'.format(self.logdir, self.name)))
640 # Remove IP addresses from OS first - we have them in zebra.conf
641 self.removeIPs()
642 # If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
643 # No error - but return message and skip all the tests
644 if self.daemons['ldpd'] == 1:
645 ldpd_path = os.path.join(self.daemondir, 'ldpd')
646 if not os.path.isfile(ldpd_path):
647 logger.info("LDP Test, but no ldpd compiled or installed")
648 return "LDP Test, but no ldpd compiled or installed"
649
650 if version_cmp(platform.release(), '4.5') < 0:
651 logger.info("LDP Test need Linux Kernel 4.5 minimum")
652 return "LDP Test need Linux Kernel 4.5 minimum"
653 # Check if have mpls
654 if tgen != None:
655 self.hasmpls = tgen.hasmpls
656 if self.hasmpls != True:
657 logger.info("LDP/MPLS Tests will be skipped, platform missing module(s)")
658 else:
659 # Test for MPLS Kernel modules available
660 self.hasmpls = False
661 if os.system('/sbin/modprobe mpls-router') != 0:
662 logger.info('MPLS tests will not run (missing mpls-router kernel module)')
663 elif os.system('/sbin/modprobe mpls-iptunnel') != 0:
664 logger.info('MPLS tests will not run (missing mpls-iptunnel kernel module)')
665 else:
666 self.hasmpls = True
667 if self.hasmpls != True:
668 return "LDP/MPLS Tests need mpls kernel modules"
669 self.cmd('echo 100000 > /proc/sys/net/mpls/platform_labels')
670
671 if self.daemons['eigrpd'] == 1:
672 eigrpd_path = os.path.join(self.daemondir, 'eigrpd')
673 if not os.path.isfile(eigrpd_path):
674 logger.info("EIGRP Test, but no eigrpd compiled or installed")
675 return "EIGRP Test, but no eigrpd compiled or installed"
676
677 self.restartRouter()
678 return ""
679
680 def restartRouter(self):
681 # Starts actual daemons without init (ie restart)
682 # cd to per node directory
683 self.cmd('cd {}/{}'.format(self.logdir, self.name))
684 self.cmd('umask 000')
685 #Re-enable to allow for report per run
686 self.reportCores = True
687 # Start Zebra first
688 if self.daemons['zebra'] == 1:
689 zebra_path = os.path.join(self.daemondir, 'zebra')
690 zebra_option = self.daemons_options['zebra']
691 self.cmd('{0} {1} > zebra.out 2> zebra.err &'.format(
692 zebra_path, zebra_option, self.logdir, self.name
693 ))
694 self.waitOutput()
695 logger.debug('{}: {} zebra started'.format(self, self.routertype))
696 sleep(1, '{}: waiting for zebra to start'.format(self.name))
697 # Fix Link-Local Addresses
698 # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
699 self.cmd('for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS=\':\'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done')
700 # Now start all the other daemons
701 for daemon in self.daemons:
702 # Skip disabled daemons and zebra
703 if self.daemons[daemon] == 0 or daemon == 'zebra':
704 continue
705
706 daemon_path = os.path.join(self.daemondir, daemon)
707 self.cmd('{0} > {3}.out 2> {3}.err &'.format(
708 daemon_path, self.logdir, self.name, daemon
709 ))
710 self.waitOutput()
711 logger.debug('{}: {} {} started'.format(self, self.routertype, daemon))
712 def getStdErr(self, daemon):
713 return self.getLog('err', daemon)
714 def getStdOut(self, daemon):
715 return self.getLog('out', daemon)
716 def getLog(self, log, daemon):
717 return self.cmd('cat {}/{}/{}.{}'.format(self.logdir, self.name, daemon, log))
718
719 def checkRouterCores(self, reportLeaks=True, reportOnce=False):
720 if reportOnce and not self.reportCores:
721 return
722 reportMade = False
723 for daemon in self.daemons:
724 if (self.daemons[daemon] == 1):
725 # Look for core file
726 corefiles = glob.glob('{}/{}/{}_core*.dmp'.format(
727 self.logdir, self.name, daemon))
728 if (len(corefiles) > 0):
729 daemon_path = os.path.join(self.daemondir, daemon)
730 backtrace = subprocess.check_output([
731 "gdb {} {} --batch -ex bt 2> /dev/null".format(daemon_path, corefiles[0])
732 ], shell=True)
733 sys.stderr.write("\n%s: %s crashed. Core file found - Backtrace follows:\n" % (self.name, daemon))
734 sys.stderr.write("%s" % backtrace)
735 reportMade = True
736 elif reportLeaks:
737 log = self.getStdErr(daemon)
738 if "memstats" in log:
739 sys.stderr.write("%s: %s has memory leaks:\n" % (self.name, daemon))
740 log = re.sub("core_handler: ", "", log)
741 log = re.sub(r"(showing active allocations in memory group [a-zA-Z0-9]+)", r"\n ## \1", log)
742 log = re.sub("memstats: ", " ", log)
743 sys.stderr.write(log)
744 reportMade = True
745 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
746 if checkAddressSanitizerError(self.getStdErr(daemon), self.name, daemon):
747 sys.stderr.write("%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon))
748 reportMade = True
749 if reportMade:
750 self.reportCores = False
751
752 def checkRouterRunning(self):
753 "Check if router daemons are running and collect crashinfo they don't run"
754
755 global fatal_error
756
757 daemonsRunning = self.cmd('vtysh -c "show log" | grep "Logging configuration for"')
758 # Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
759 if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"):
760 return "%s: vtysh killed by AddressSanitizer" % (self.name)
761
762 for daemon in self.daemons:
763 if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
764 sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
765 # Look for core file
766 corefiles = glob.glob('{}/{}/{}_core*.dmp'.format(
767 self.logdir, self.name, daemon))
768 if (len(corefiles) > 0):
769 daemon_path = os.path.join(self.daemondir, daemon)
770 backtrace = subprocess.check_output([
771 "gdb {} {} --batch -ex bt 2> /dev/null".format(daemon_path, corefiles[0])
772 ], shell=True)
773 sys.stderr.write("\n%s: %s crashed. Core file found - Backtrace follows:\n" % (self.name, daemon))
774 sys.stderr.write("%s\n" % backtrace)
775 else:
776 # No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
777 if os.path.isfile('{}/{}/{}.log'.format(self.logdir, self.name, daemon)):
778 log_tail = subprocess.check_output([
779 "tail -n20 {}/{}/{}.log 2> /dev/null".format(
780 self.logdir, self.name, daemon)
781 ], shell=True)
782 sys.stderr.write("\nFrom %s %s %s log file:\n" % (self.routertype, self.name, daemon))
783 sys.stderr.write("%s\n" % log_tail)
784
785 # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
786 if checkAddressSanitizerError(self.getStdErr(daemon), self.name, daemon):
787 return "%s: Daemon %s not running - killed by AddressSanitizer" % (self.name, daemon)
788
789 return "%s: Daemon %s not running" % (self.name, daemon)
790 return ""
791 def get_ipv6_linklocal(self):
792 "Get LinkLocal Addresses from interfaces"
793
794 linklocal = []
795
796 ifaces = self.cmd('ip -6 address')
797 # Fix newlines (make them all the same)
798 ifaces = ('\n'.join(ifaces.splitlines()) + '\n').splitlines()
799 interface=""
800 ll_per_if_count=0
801 for line in ifaces:
802 m = re.search('[0-9]+: ([^:@]+)[@if0-9:]+ <', line)
803 if m:
804 interface = m.group(1)
805 ll_per_if_count = 0
806 m = re.search('inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link', line)
807 if m:
808 local = m.group(1)
809 ll_per_if_count += 1
810 if (ll_per_if_count > 1):
811 linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
812 else:
813 linklocal += [[interface, local]]
814 return linklocal
815 def daemon_available(self, daemon):
816 "Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
817
818 daemon_path = os.path.join(self.daemondir, daemon)
819 if not os.path.isfile(daemon_path):
820 return False
821 if (daemon == 'ldpd'):
822 if version_cmp(platform.release(), '4.5') < 0:
823 return False
824 if self.cmd('/sbin/modprobe -n mpls-router' ) != "":
825 return False
826 if self.cmd('/sbin/modprobe -n mpls-iptunnel') != "":
827 return False
828
829 return True
830 def get_routertype(self):
831 "Return the type of Router (frr or quagga)"
832
833 return self.routertype
834 def report_memory_leaks(self, filename_prefix, testscript):
835 "Report Memory Leaks to file prefixed with given string"
836
837 leakfound = False
838 filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt"
839 for daemon in self.daemons:
840 if (self.daemons[daemon] == 1):
841 log = self.getStdErr(daemon)
842 if "memstats" in log:
843 # Found memory leak
844 logger.info('\nRouter {} {} StdErr Log:\n{}'.format(
845 self.name, daemon, log))
846 if not leakfound:
847 leakfound = True
848 # Check if file already exists
849 fileexists = os.path.isfile(filename)
850 leakfile = open(filename, "a")
851 if not fileexists:
852 # New file - add header
853 leakfile.write("# Memory Leak Detection for topotest %s\n\n" % testscript)
854 leakfile.write("## Router %s\n" % self.name)
855 leakfile.write("### Process %s\n" % daemon)
856 log = re.sub("core_handler: ", "", log)
857 log = re.sub(r"(showing active allocations in memory group [a-zA-Z0-9]+)", r"\n#### \1\n", log)
858 log = re.sub("memstats: ", " ", log)
859 leakfile.write(log)
860 leakfile.write("\n")
861 if leakfound:
862 leakfile.close()
863
864
865 class LegacySwitch(OVSSwitch):
866 "A Legacy Switch without OpenFlow"
867
868 def __init__(self, name, **params):
869 OVSSwitch.__init__(self, name, failMode='standalone', **params)
870 self.switchIP = None