# OF THIS SOFTWARE.
#
+import json
import os
import errno
import re
import glob
import StringIO
import subprocess
+import tempfile
import platform
+import difflib
+import time
+
+from lib.topolog import logger
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.link import Intf
-from time import sleep
+class json_cmp_result(object):
+ "json_cmp result class for better assertion messages"
+
+ def __init__(self):
+ self.errors = []
+
+ def add_error(self, error):
+ "Append error message to the result"
+ for line in error.splitlines():
+ self.errors.append(line)
+
+ def has_errors(self):
+ "Returns True if there were errors, otherwise False."
+ return len(self.errors) > 0
+
+def get_test_logdir(node=None, init=False):
+ """
+ Return the current test log directory based on PYTEST_CURRENT_TEST
+ environment variable.
+ Optional paramters:
+ node: when set, adds the node specific log directory to the init dir
+ init: when set, initializes the log directory and fixes path permissions
+ """
+ cur_test = os.environ['PYTEST_CURRENT_TEST']
+
+ ret = '/tmp/topotests/' + cur_test[0:cur_test.find(".py")].replace('/','.')
+ if node != None:
+ dir = ret + "/" + node
+ if init:
+ os.system('mkdir -p ' + dir)
+ os.system('chmod -R go+rw /tmp/topotests')
+ return ret
+
+def json_diff(d1, d2):
+ """
+ Returns a string with the difference between JSON data.
+ """
+ json_format_opts = {
+ 'indent': 4,
+ 'sort_keys': True,
+ }
+ dstr1 = json.dumps(d1, **json_format_opts)
+ dstr2 = json.dumps(d2, **json_format_opts)
+ return difflines(dstr2, dstr1, title1='Expected value', title2='Current value', n=0)
+
+def json_cmp(d1, d2):
+ """
+ JSON compare function. Receives two parameters:
+ * `d1`: json value
+ * `d2`: json subset which we expect
+
+ Returns `None` when all keys that `d1` has matches `d2`,
+ otherwise a string containing what failed.
+
+ Note: key absence can be tested by adding a key with value `None`.
+ """
+ squeue = [(d1, d2, 'json')]
+ result = json_cmp_result()
+ for s in squeue:
+ nd1, nd2, parent = s
+ s1, s2 = set(nd1), set(nd2)
+
+ # Expect all required fields to exist.
+ s2_req = set([key for key in nd2 if nd2[key] is not None])
+ diff = s2_req - s1
+ if diff != set({}):
+ result.add_error('expected key(s) {} in {} (have {}):\n{}'.format(
+ str(list(diff)), parent, str(list(s1)), json_diff(nd1, nd2)))
+
+ for key in s2.intersection(s1):
+ # Test for non existence of key in d2
+ if nd2[key] is None:
+ result.add_error('"{}" should not exist in {} (have {}):\n{}'.format(
+ key, parent, str(s1), json_diff(nd1[key], nd2[key])))
+ continue
+ # If nd1 key is a dict, we have to recurse in it later.
+ if isinstance(nd2[key], type({})):
+ if not isinstance(nd1[key], type({})):
+ result.add_error(
+ '{}["{}"] has different type than expected '.format(parent, key) +
+ '(have {}, expected {}):\n{}'.format(
+ type(nd1[key]), type(nd2[key]), json_diff(nd1[key], nd2[key])))
+ continue
+ nparent = '{}["{}"]'.format(parent, key)
+ squeue.append((nd1[key], nd2[key], nparent))
+ continue
+ # Check list items
+ if isinstance(nd2[key], type([])):
+ if not isinstance(nd1[key], type([])):
+ result.add_error(
+ '{}["{}"] has different type than expected '.format(parent, key) +
+ '(have {}, expected {}):\n{}'.format(
+ type(nd1[key]), type(nd2[key]), json_diff(nd1[key], nd2[key])))
+ continue
+ # Check list size
+ if len(nd2[key]) > len(nd1[key]):
+ result.add_error(
+ '{}["{}"] too few items '.format(parent, key) +
+ '(have {}, expected {}:\n {})'.format(
+ len(nd1[key]), len(nd2[key]),
+ json_diff(nd1[key], nd2[key])))
+ continue
+
+ # List all unmatched items errors
+ unmatched = []
+ for expected in nd2[key]:
+ matched = False
+ for value in nd1[key]:
+ if json_cmp({'json': value}, {'json': expected}) is None:
+ matched = True
+ break
+
+ if matched:
+ break
+ if not matched:
+ unmatched.append(expected)
+
+ # If there are unmatched items, error out.
+ if unmatched:
+ result.add_error(
+ '{}["{}"] value is different (\n{})'.format(
+ parent, key, json_diff(nd1[key], nd2[key])))
+ continue
+
+ # Compare JSON values
+ if nd1[key] != nd2[key]:
+ result.add_error(
+ '{}["{}"] value is different (\n{})'.format(
+ parent, key, json_diff(nd1[key], nd2[key])))
+ continue
+
+ if result.has_errors():
+ return result
+
+ return None
+
+def run_and_expect(func, what, count=20, wait=3):
+ """
+ Run `func` and compare the result with `what`. Do it for `count` times
+ waiting `wait` seconds between tries. By default it tries 20 times with
+ 3 seconds delay between tries.
+
+ Returns (True, func-return) on success or
+ (False, func-return) on failure.
+ """
+ while count > 0:
+ result = func()
+ if result != what:
+ time.sleep(wait)
+ count -= 1
+ continue
+ return (True, result)
+ return (False, result)
+
def int2dpid(dpid):
"Converting Integer to DPID"
else:
return True
+def get_textdiff(text1, text2, title1="", title2="", **opts):
+ "Returns empty string if same or formatted diff"
+
+ diff = '\n'.join(difflib.unified_diff(text1, text2,
+ fromfile=title1, tofile=title2, **opts))
+ # Clean up line endings
+ diff = os.linesep.join([s for s in diff.splitlines() if s])
+ return diff
+
+def difflines(text1, text2, title1='', title2='', **opts):
+ "Wrapper for get_textdiff to avoid string transformations."
+ text1 = ('\n'.join(text1.rstrip().splitlines()) + '\n').splitlines(1)
+ text2 = ('\n'.join(text2.rstrip().splitlines()) + '\n').splitlines(1)
+ return get_textdiff(text1, text2, title1, title2, **opts)
+
+def get_file(content):
+ """
+ Generates a temporary file in '/tmp' with `content` and returns the file name.
+ """
+ fde = tempfile.NamedTemporaryFile(mode='w', delete=False)
+ fname = fde.name
+ fde.write(content)
+ fde.close()
+ return fname
+
+def normalize_text(text):
+ """
+ Strips formating spaces/tabs and carriage returns.
+ """
+ text = re.sub(r'[ \t]+', ' ', text)
+ text = re.sub(r'\r', '', text)
+ return text
+
+def version_cmp(v1, v2):
+ """
+ Compare two version strings and returns:
+
+ * `-1`: if `v1` is less than `v2`
+ * `0`: if `v1` is equal to `v2`
+ * `1`: if `v1` is greater than `v2`
+
+ Raises `ValueError` if versions are not well formated.
+ """
+ vregex = r'(?P<whole>\d+(\.(\d+))*)'
+ v1m = re.match(vregex, v1)
+ v2m = re.match(vregex, v2)
+ if v1m is None or v2m is None:
+ raise ValueError("got a invalid version string")
+
+ # Split values
+ v1g = v1m.group('whole').split('.')
+ v2g = v2m.group('whole').split('.')
+
+ # Get the longest version string
+ vnum = len(v1g)
+ if len(v2g) > vnum:
+ vnum = len(v2g)
+
+ # Reverse list because we are going to pop the tail
+ v1g.reverse()
+ v2g.reverse()
+ for _ in range(vnum):
+ try:
+ v1n = int(v1g.pop())
+ except IndexError:
+ while v2g:
+ v2n = int(v2g.pop())
+ if v2n > 0:
+ return -1
+ break
+
+ try:
+ v2n = int(v2g.pop())
+ except IndexError:
+ if v1n > 0:
+ return 1
+ while v1g:
+ v1n = int(v1g.pop())
+ if v1n > 0:
+ return 1
+ break
+
+ if v1n > v2n:
+ return 1
+ if v1n < v2n:
+ return -1
+ return 0
+
+def ip4_route(node):
+ """
+ Gets a structured return of the command 'ip route'. It can be used in
+ conjuction with json_cmp() to provide accurate assert explanations.
+
+ Return example:
+ {
+ '10.0.1.0/24': {
+ 'dev': 'eth0',
+ 'via': '172.16.0.1',
+ 'proto': '188',
+ },
+ '10.0.2.0/24': {
+ 'dev': 'eth1',
+ 'proto': 'kernel',
+ }
+ }
+ """
+ output = normalize_text(node.run('ip route')).splitlines()
+ result = {}
+ for line in output:
+ columns = line.split(' ')
+ route = result[columns[0]] = {}
+ prev = None
+ for column in columns:
+ if prev == 'dev':
+ route['dev'] = column
+ if prev == 'via':
+ route['via'] = column
+ if prev == 'proto':
+ route['proto'] = column
+ if prev == 'metric':
+ route['metric'] = column
+ if prev == 'scope':
+ route['scope'] = column
+ prev = column
+
+ return result
+
+def ip6_route(node):
+ """
+ Gets a structured return of the command 'ip -6 route'. It can be used in
+ conjuction with json_cmp() to provide accurate assert explanations.
+
+ Return example:
+ {
+ '2001:db8:1::/64': {
+ 'dev': 'eth0',
+ 'proto': '188',
+ },
+ '2001:db8:2::/64': {
+ 'dev': 'eth1',
+ 'proto': 'kernel',
+ }
+ }
+ """
+ output = normalize_text(node.run('ip -6 route')).splitlines()
+ result = {}
+ for line in output:
+ columns = line.split(' ')
+ route = result[columns[0]] = {}
+ prev = None
+ for column in columns:
+ if prev == 'dev':
+ route['dev'] = column
+ if prev == 'via':
+ route['via'] = column
+ if prev == 'proto':
+ route['proto'] = column
+ if prev == 'metric':
+ route['metric'] = column
+ if prev == 'pref':
+ route['pref'] = column
+ prev = column
+
+ return result
+
+def sleep(amount, reason=None):
+ """
+ Sleep wrapper that registers in the log the amount of sleep
+ """
+ if reason is None:
+ logger.info('Sleeping for {} seconds'.format(amount))
+ else:
+ logger.info(reason + ' ({} seconds)'.format(amount))
+
+ time.sleep(amount)
+
def checkAddressSanitizerError(output, router, component):
"Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
addrSanFile.write(' '+ '\n '.join(addressSantizerLog.group(1).splitlines()) + '\n')
addrSanFile.write("\n---------------\n")
return True
- return False
+ return False
def addRouter(topo, name):
"Adding a FRRouter (or Quagga) to Topology"
'/var/log']
return topo.addNode(name, cls=Router, privateDirs=MyPrivateDirs)
+def set_sysctl(node, sysctl, value):
+ "Set a sysctl value and return None on success or an error string"
+ valuestr = '{}'.format(value)
+ command = "sysctl {0}={1}".format(sysctl, valuestr)
+ cmdret = node.cmd(command)
+
+ matches = re.search(r'([^ ]+) = ([^\s]+)', cmdret)
+ if matches is None:
+ return cmdret
+ if matches.group(1) != sysctl:
+ return cmdret
+ if matches.group(2) != valuestr:
+ return cmdret
+
+ return None
+
+def assert_sysctl(node, sysctl, value):
+ "Set and assert that the sysctl is set with the specified value."
+ assert set_sysctl(node, sysctl, value) is None
+
class LinuxRouter(Node):
"A Node with IPv4/IPv6 forwarding enabled."
def config(self, **params):
super(LinuxRouter, self).config(**params)
# Enable forwarding on the router
- self.cmd('sysctl net.ipv4.ip_forward=1')
- self.cmd('sysctl net.ipv6.conf.all.forwarding=1')
+ assert_sysctl(self, 'net.ipv4.ip_forward', 1)
+ assert_sysctl(self, 'net.ipv6.conf.all.forwarding', 1)
def terminate(self):
"""
Terminate generic LinuxRouter Mininet instance
"""
- self.cmd('sysctl net.ipv4.ip_forward=0')
- self.cmd('sysctl net.ipv6.conf.all.forwarding=0')
+ set_sysctl(self, 'net.ipv4.ip_forward', 0)
+ set_sysctl(self, 'net.ipv6.conf.all.forwarding', 0)
super(LinuxRouter, self).terminate()
class Router(Node):
"A Node with IPv4/IPv6 forwarding enabled and Quagga as Routing Engine"
+ def __init__(self, name, **params):
+ super(Router, self).__init__(name, **params)
+ self.logdir = params.get('logdir', get_test_logdir(name, True))
+ self.daemondir = None
+ self.hasmpls = False
+ self.routertype = 'frr'
+ self.daemons = {'zebra': 0, 'ripd': 0, 'ripngd': 0, 'ospfd': 0,
+ 'ospf6d': 0, 'isisd': 0, 'bgpd': 0, 'pimd': 0,
+ 'ldpd': 0, 'eigrpd': 0, 'nhrpd': 0}
+ self.daemons_options = {'zebra': ''}
+ self.reportCores = True
+
+ def _config_frr(self, **params):
+ "Configure FRR binaries"
+ self.daemondir = params.get('frrdir')
+ if self.daemondir is None:
+ self.daemondir = '/usr/lib/frr'
+
+ zebra_path = os.path.join(self.daemondir, 'zebra')
+ if not os.path.isfile(zebra_path):
+ raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
+
+ def _config_quagga(self, **params):
+ "Configure Quagga binaries"
+ self.daemondir = params.get('quaggadir')
+ if self.daemondir is None:
+ self.daemondir = '/usr/lib/quagga'
+
+ zebra_path = os.path.join(self.daemondir, 'zebra')
+ if not os.path.isfile(zebra_path):
+ raise Exception("Quagga zebra binary doesn't exist at {}".format(zebra_path))
+
+ # pylint: disable=W0221
+ # Some params are only meaningful for the parent class.
def config(self, **params):
super(Router, self).config(**params)
- # Check if Quagga or FRR is installed
- if os.path.isfile('/usr/lib/frr/zebra'):
- self.routertype = 'frr'
- elif os.path.isfile('/usr/lib/quagga/zebra'):
- self.routertype = 'quagga'
+ # User did not specify the daemons directory, try to autodetect it.
+ self.daemondir = params.get('daemondir')
+ if self.daemondir is None:
+ self.routertype = params.get('routertype', 'frr')
+ if self.routertype == 'quagga':
+ self._config_quagga(**params)
+ else:
+ self._config_frr(**params)
else:
- raise Exception('No FRR or Quagga found in ususal location')
+ # Test the provided path
+ zpath = os.path.join(self.daemondir, 'zebra')
+ if not os.path.isfile(zpath):
+ raise Exception('No zebra binary found in {}'.format(zpath))
+ # Allow user to specify routertype when the path was specified.
+ if params.get('routertype') is not None:
+ self.routertype = self.params.get('routertype')
+
# Enable forwarding on the router
- self.cmd('sysctl net.ipv4.ip_forward=1')
- self.cmd('sysctl net.ipv6.conf.all.forwarding=1')
+ assert_sysctl(self, 'net.ipv4.ip_forward', 1)
+ assert_sysctl(self, 'net.ipv6.conf.all.forwarding', 1)
# Enable coredumps
- self.cmd('sysctl kernel.core_uses_pid=1')
- self.cmd('sysctl fs.suid_dumpable=2')
- self.cmd("sysctl kernel.core_pattern=/tmp/%s_%%e_core-sig_%%s-pid_%%p.dmp" % self.name)
+ assert_sysctl(self, 'kernel.core_uses_pid', 1)
+ assert_sysctl(self, 'fs.suid_dumpable', 1)
+ #this applies to the kernel not the namespace...
+ #original on ubuntu 17.x, but apport won't save as in namespace
+ # |/usr/share/apport/apport %p %s %c %d %P
+ corefile = '%e_core-sig_%s-pid_%p.dmp'
+ assert_sysctl(self, 'kernel.core_pattern', corefile)
self.cmd('ulimit -c unlimited')
# Set ownership of config files
- self.cmd('chown %s:%svty /etc/%s' % (self.routertype, self.routertype, self.routertype))
- self.daemons = {'zebra': 0, 'ripd': 0, 'ripngd': 0, 'ospfd': 0,
- 'ospf6d': 0, 'isisd': 0, 'bgpd': 0, 'pimd': 0,
- 'ldpd': 0}
+ self.cmd('chown {0}:{0}vty /etc/{0}'.format(self.routertype))
+
def terminate(self):
# Delete Running Quagga or FRR Daemons
self.stopRouter()
# self.cmd('kill -7 `cat %s`' % d.rstrip())
# self.waitOutput()
# Disable forwarding
- self.cmd('sysctl net.ipv4.ip_forward=0')
- self.cmd('sysctl net.ipv6.conf.all.forwarding=0')
+ set_sysctl(self, 'net.ipv4.ip_forward', 0)
+ set_sysctl(self, 'net.ipv6.conf.all.forwarding', 0)
super(Router, self).terminate()
- def stopRouter(self):
+ os.system('chmod -R go+rw /tmp/topotests')
+
+ def stopRouter(self, wait=True, assertOnError=True):
# Stop Running Quagga or FRR Daemons
rundaemons = self.cmd('ls -1 /var/run/%s/*.pid' % self.routertype)
+ errors = ""
+ if re.search(r"No such file or directory", rundaemons):
+ return errors
if rundaemons is not None:
+ numRunning = 0
for d in StringIO.StringIO(rundaemons):
daemonpid = self.cmd('cat %s' % d.rstrip()).rstrip()
- if pid_exists(int(daemonpid)):
- self.cmd('kill -7 %s' % daemonpid)
+ if (daemonpid.isdigit() and pid_exists(int(daemonpid))):
+ logger.info('{}: stopping {}'.format(
+ self.name,
+ os.path.basename(d.rstrip().rsplit(".", 1)[0])
+ ))
+ self.cmd('kill -TERM %s' % daemonpid)
self.waitOutput()
+ if pid_exists(int(daemonpid)):
+ numRunning += 1
+ if wait and numRunning > 0:
+ sleep(2, '{}: waiting for daemons stopping'.format(self.name))
+ # 2nd round of kill if daemons didn't exit
+ for d in StringIO.StringIO(rundaemons):
+ daemonpid = self.cmd('cat %s' % d.rstrip()).rstrip()
+ if (daemonpid.isdigit() and pid_exists(int(daemonpid))):
+ logger.info('{}: killing {}'.format(
+ self.name,
+ os.path.basename(d.rstrip().rsplit(".", 1)[0])
+ ))
+ self.cmd('kill -7 %s' % daemonpid)
+ self.waitOutput()
+ self.cmd('rm -- {}'.format(d.rstrip()))
+ if wait:
+ errors = self.checkRouterCores(reportOnce=True)
+ if assertOnError and len(errors) > 0:
+ assert "Errors found - details follow:" == 0, errors
+ return errors
+
def removeIPs(self):
for interface in self.intfNames():
self.cmd('ip address flush', interface)
- def loadConf(self, daemon, source=None):
+
+ def checkCapability(self, daemon, param):
+ if param is not None:
+ daemon_path = os.path.join(self.daemondir, daemon)
+ daemon_search_option = param.replace('-','')
+ output = self.cmd('{0} -h | grep {1}'.format(
+ daemon_path, daemon_search_option))
+ if daemon_search_option not in output:
+ return False
+ return True
+
+ def loadConf(self, daemon, source=None, param=None):
# print "Daemons before:", self.daemons
if daemon in self.daemons.keys():
self.daemons[daemon] = 1
+ if param is not None:
+ self.daemons_options[daemon] = param
if source is None:
self.cmd('touch /etc/%s/%s.conf' % (self.routertype, daemon))
self.waitOutput()
self.cmd('chown %s:%s /etc/%s/%s.conf' % (self.routertype, self.routertype, self.routertype, daemon))
self.waitOutput()
else:
- print("No daemon %s known" % daemon)
+ logger.info('No daemon {} known'.format(daemon))
# print "Daemons after:", self.daemons
- def startRouter(self):
+
+ def startRouter(self, tgen=None):
# Disable integrated-vtysh-config
self.cmd('echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf' % self.routertype)
self.cmd('chown %s:%svty /etc/%s/vtysh.conf' % (self.routertype, self.routertype, self.routertype))
+ # TODO remove the following lines after all tests are migrated to Topogen.
# Try to find relevant old logfiles in /tmp and delete them
- map(os.remove, glob.glob("/tmp/*%s*.log" % self.name))
+ map(os.remove, glob.glob('{}/{}/*.log'.format(self.logdir, self.name)))
# Remove old core files
- map(os.remove, glob.glob("/tmp/%s*.dmp" % self.name))
+ map(os.remove, glob.glob('{}/{}/*.dmp'.format(self.logdir, self.name)))
# Remove IP addresses from OS first - we have them in zebra.conf
self.removeIPs()
# If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
# No error - but return message and skip all the tests
if self.daemons['ldpd'] == 1:
- if not os.path.isfile('/usr/lib/%s/ldpd' % self.routertype):
- print("LDP Test, but no ldpd compiled or installed")
+ ldpd_path = os.path.join(self.daemondir, 'ldpd')
+ if not os.path.isfile(ldpd_path):
+ logger.info("LDP Test, but no ldpd compiled or installed")
return "LDP Test, but no ldpd compiled or installed"
- kernel_version = re.search(r'([0-9]+)\.([0-9]+).*', platform.release())
-
- if kernel_version:
- if (float(kernel_version.group(1)) < 4 or
- (float(kernel_version.group(1)) == 4 and float(kernel_version.group(2)) < 5)):
- print("LDP Test need Linux Kernel 4.5 minimum")
- return "LDP Test need Linux Kernel 4.5 minimum"
- # Add mpls modules to kernel if we use LDP
- if self.daemons['ldpd'] == 1:
- self.cmd('/sbin/modprobe mpls-router')
- self.cmd('/sbin/modprobe mpls-iptunnel')
+
+ if version_cmp(platform.release(), '4.5') < 0:
+ logger.info("LDP Test need Linux Kernel 4.5 minimum")
+ return "LDP Test need Linux Kernel 4.5 minimum"
+ # Check if have mpls
+ if tgen != None:
+ self.hasmpls = tgen.hasmpls
+ if self.hasmpls != True:
+ logger.info("LDP/MPLS Tests will be skipped, platform missing module(s)")
+ else:
+ # Test for MPLS Kernel modules available
+ self.hasmpls = False
+ if os.system('/sbin/modprobe mpls-router') != 0:
+ logger.info('MPLS tests will not run (missing mpls-router kernel module)')
+ elif os.system('/sbin/modprobe mpls-iptunnel') != 0:
+ logger.info('MPLS tests will not run (missing mpls-iptunnel kernel module)')
+ else:
+ self.hasmpls = True
+ if self.hasmpls != True:
+ return "LDP/MPLS Tests need mpls kernel modules"
self.cmd('echo 100000 > /proc/sys/net/mpls/platform_labels')
- # Init done - now restarting daemons
+
+ if self.daemons['eigrpd'] == 1:
+ eigrpd_path = os.path.join(self.daemondir, 'eigrpd')
+ if not os.path.isfile(eigrpd_path):
+ logger.info("EIGRP Test, but no eigrpd compiled or installed")
+ return "EIGRP Test, but no eigrpd compiled or installed"
+
self.restartRouter()
return ""
+
def restartRouter(self):
- # Starts actuall daemons without init (ie restart)
+ # Starts actual daemons without init (ie restart)
+ # cd to per node directory
+ self.cmd('cd {}/{}'.format(self.logdir, self.name))
+ self.cmd('umask 000')
+ #Re-enable to allow for report per run
+ self.reportCores = True
# Start Zebra first
if self.daemons['zebra'] == 1:
-# self.cmd('/usr/lib/%s/zebra -d' % self.routertype)
- self.cmd('/usr/lib/%s/zebra > /tmp/%s-zebra.out 2> /tmp/%s-zebra.err &' % (self.routertype, self.name, self.name))
+ zebra_path = os.path.join(self.daemondir, 'zebra')
+ zebra_option = self.daemons_options['zebra']
+ self.cmd('{0} {1} > zebra.out 2> zebra.err &'.format(
+ zebra_path, zebra_option, self.logdir, self.name
+ ))
self.waitOutput()
- print('%s: %s zebra started' % (self, self.routertype))
- sleep(1)
+ logger.debug('{}: {} zebra started'.format(self, self.routertype))
+ sleep(1, '{}: waiting for zebra to start'.format(self.name))
# Fix Link-Local Addresses
# Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
self.cmd('for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS=\':\'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done')
# Now start all the other daemons
for daemon in self.daemons:
- if (self.daemons[daemon] == 1) and (daemon != 'zebra'):
-# self.cmd('/usr/lib/%s/%s -d' % (self.routertype, daemon))
- self.cmd('/usr/lib/%s/%s > /tmp/%s-%s.out 2> /tmp/%s-%s.err &' % (self.routertype, daemon, self.name, daemon, self.name, daemon))
- self.waitOutput()
- print('%s: %s %s started' % (self, self.routertype, daemon))
+ # Skip disabled daemons and zebra
+ if self.daemons[daemon] == 0 or daemon == 'zebra':
+ continue
+
+ daemon_path = os.path.join(self.daemondir, daemon)
+ self.cmd('{0} > {3}.out 2> {3}.err &'.format(
+ daemon_path, self.logdir, self.name, daemon
+ ))
+ self.waitOutput()
+ logger.debug('{}: {} {} started'.format(self, self.routertype, daemon))
def getStdErr(self, daemon):
return self.getLog('err', daemon)
def getStdOut(self, daemon):
return self.getLog('out', daemon)
def getLog(self, log, daemon):
- return self.cmd('cat /tmp/%s-%s.%s' % (self.name, daemon, log) )
+ return self.cmd('cat {}/{}/{}.{}'.format(self.logdir, self.name, daemon, log))
+
+ def checkRouterCores(self, reportLeaks=True, reportOnce=False):
+ if reportOnce and not self.reportCores:
+ return
+ reportMade = False
+ traces = ""
+ for daemon in self.daemons:
+ if (self.daemons[daemon] == 1):
+ # Look for core file
+ corefiles = glob.glob('{}/{}/{}_core*.dmp'.format(
+ self.logdir, self.name, daemon))
+ if (len(corefiles) > 0):
+ daemon_path = os.path.join(self.daemondir, daemon)
+ backtrace = subprocess.check_output([
+ "gdb {} {} --batch -ex bt 2> /dev/null".format(daemon_path, corefiles[0])
+ ], shell=True)
+ sys.stderr.write("\n%s: %s crashed. Core file found - Backtrace follows:\n" % (self.name, daemon))
+ sys.stderr.write("%s" % backtrace)
+ traces = traces + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s" % (self.name, daemon, backtrace)
+ reportMade = True
+ elif reportLeaks:
+ log = self.getStdErr(daemon)
+ if "memstats" in log:
+ sys.stderr.write("%s: %s has memory leaks:\n" % (self.name, daemon))
+ traces = traces + "\n%s: %s has memory leaks:\n" % (self.name, daemon)
+ log = re.sub("core_handler: ", "", log)
+ log = re.sub(r"(showing active allocations in memory group [a-zA-Z0-9]+)", r"\n ## \1", log)
+ log = re.sub("memstats: ", " ", log)
+ sys.stderr.write(log)
+ reportMade = True
+ # Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
+ if checkAddressSanitizerError(self.getStdErr(daemon), self.name, daemon):
+ sys.stderr.write("%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon))
+ traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)
+ reportMade = True
+ if reportMade:
+ self.reportCores = False
+ return traces
+
def checkRouterRunning(self):
"Check if router daemons are running and collect crashinfo they don't run"
if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
# Look for core file
- corefiles = glob.glob("/tmp/%s_%s_core*.dmp" % (self.name, daemon))
+ corefiles = glob.glob('{}/{}/{}_core*.dmp'.format(
+ self.logdir, self.name, daemon))
if (len(corefiles) > 0):
- backtrace = subprocess.check_output(["gdb /usr/lib/%s/%s %s --batch -ex bt 2> /dev/null" % (self.routertype, daemon, corefiles[0])], shell=True)
+ daemon_path = os.path.join(self.daemondir, daemon)
+ backtrace = subprocess.check_output([
+ "gdb {} {} --batch -ex bt 2> /dev/null".format(daemon_path, corefiles[0])
+ ], shell=True)
sys.stderr.write("\n%s: %s crashed. Core file found - Backtrace follows:\n" % (self.name, daemon))
sys.stderr.write("%s\n" % backtrace)
else:
# No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
- if os.path.isfile("/tmp/%s-%s.log" % (self.name, daemon)):
- log_tail = subprocess.check_output(["tail -n20 /tmp/%s-%s.log 2> /dev/null" % (self.name, daemon)], shell=True)
+ if os.path.isfile('{}/{}/{}.log'.format(self.logdir, self.name, daemon)):
+ log_tail = subprocess.check_output([
+ "tail -n20 {}/{}/{}.log 2> /dev/null".format(
+ self.logdir, self.name, daemon)
+ ], shell=True)
sys.stderr.write("\nFrom %s %s %s log file:\n" % (self.routertype, self.name, daemon))
sys.stderr.write("%s\n" % log_tail)
def daemon_available(self, daemon):
"Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
- if not os.path.isfile('/usr/lib/%s/%s' % (self.routertype, daemon)):
+ daemon_path = os.path.join(self.daemondir, daemon)
+ if not os.path.isfile(daemon_path):
return False
if (daemon == 'ldpd'):
- kernel_version = re.search(r'([0-9]+)\.([0-9]+).*', platform.release())
- if kernel_version:
- if (float(kernel_version.group(1)) < 4 or
- (float(kernel_version.group(1)) == 4 and float(kernel_version.group(2)) < 5)):
- return False
- else:
+ if version_cmp(platform.release(), '4.5') < 0:
+ return False
+ if self.cmd('/sbin/modprobe -n mpls-router' ) != "":
+ return False
+ if self.cmd('/sbin/modprobe -n mpls-iptunnel') != "":
return False
+
return True
def get_routertype(self):
"Return the type of Router (frr or quagga)"
log = self.getStdErr(daemon)
if "memstats" in log:
# Found memory leak
- print("\nRouter %s %s StdErr Log:\n%s" % (self.name, daemon, log))
+ logger.info('\nRouter {} {} StdErr Log:\n{}'.format(
+ self.name, daemon, log))
if not leakfound:
leakfound = True
# Check if file already exists
def __init__(self, name, **params):
OVSSwitch.__init__(self, name, failMode='standalone', **params)
self.switchIP = None
-