]> git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/bgp_evpn_mh/test_evpn_mh.py
Merge pull request #9364 from LabNConsulting/ziemba/vrf_name_to_id-unknown
[mirror_frr.git] / tests / topotests / bgp_evpn_mh / test_evpn_mh.py
1 #!/usr/bin/env python
2
3 #
4 # test_evpn_mh.py
5 #
6 # Copyright (c) 2020 by
7 # Cumulus Networks, Inc.
8 # Anuradha Karuppiah
9 #
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
13 # in all copies.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 # OF THIS SOFTWARE.
23 #
24
25 """
26 test_evpn_mh.py: Testing EVPN multihoming
27
28 """
29
30 import os
31 import sys
32 import subprocess
33 from functools import partial
34
35 import pytest
36 import json
37 import platform
38 from functools import partial
39
40 pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
41
42 # Save the Current Working Directory to find configuration files.
43 CWD = os.path.dirname(os.path.realpath(__file__))
44 sys.path.append(os.path.join(CWD, "../"))
45
46 # pylint: disable=C0413
47 # Import topogen and topotest helpers
48 from lib import topotest
49
50 # Required to instantiate the topology builder class.
51 from lib.topogen import Topogen, TopoRouter, get_topogen
52
53 pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
54
55 #####################################################
56 ##
57 ## Network Topology Definition
58 ##
59 ## See topology picture at evpn-mh-topo-tests.pdf
60 #####################################################
61
62
63 def build_topo(tgen):
64 """
65 EVPN Multihoming Topology -
66 1. Two level CLOS
67 2. Two spine switches - spine1, spine2
68 3. Two racks with Top-of-Rack switches per rack - tormx1, tormx2
69 4. Two dual attached hosts per-rack - hostdx1, hostdx2
70 """
71
72 tgen.add_router("spine1")
73 tgen.add_router("spine2")
74 tgen.add_router("torm11")
75 tgen.add_router("torm12")
76 tgen.add_router("torm21")
77 tgen.add_router("torm22")
78 tgen.add_router("hostd11")
79 tgen.add_router("hostd12")
80 tgen.add_router("hostd21")
81 tgen.add_router("hostd22")
82
83 # On main router
84 # First switch is for a dummy interface (for local network)
85
86 ##################### spine1 ########################
87 # spine1-eth0 is connected to torm11-eth0
88 switch = tgen.add_switch("sw1")
89 switch.add_link(tgen.gears["spine1"])
90 switch.add_link(tgen.gears["torm11"])
91
92 # spine1-eth1 is connected to torm12-eth0
93 switch = tgen.add_switch("sw2")
94 switch.add_link(tgen.gears["spine1"])
95 switch.add_link(tgen.gears["torm12"])
96
97 # spine1-eth2 is connected to torm21-eth0
98 switch = tgen.add_switch("sw3")
99 switch.add_link(tgen.gears["spine1"])
100 switch.add_link(tgen.gears["torm21"])
101
102 # spine1-eth3 is connected to torm22-eth0
103 switch = tgen.add_switch("sw4")
104 switch.add_link(tgen.gears["spine1"])
105 switch.add_link(tgen.gears["torm22"])
106
107 ##################### spine2 ########################
108 # spine2-eth0 is connected to torm11-eth1
109 switch = tgen.add_switch("sw5")
110 switch.add_link(tgen.gears["spine2"])
111 switch.add_link(tgen.gears["torm11"])
112
113 # spine2-eth1 is connected to torm12-eth1
114 switch = tgen.add_switch("sw6")
115 switch.add_link(tgen.gears["spine2"])
116 switch.add_link(tgen.gears["torm12"])
117
118 # spine2-eth2 is connected to torm21-eth1
119 switch = tgen.add_switch("sw7")
120 switch.add_link(tgen.gears["spine2"])
121 switch.add_link(tgen.gears["torm21"])
122
123 # spine2-eth3 is connected to torm22-eth1
124 switch = tgen.add_switch("sw8")
125 switch.add_link(tgen.gears["spine2"])
126 switch.add_link(tgen.gears["torm22"])
127
128 ##################### torm11 ########################
129 # torm11-eth2 is connected to hostd11-eth0
130 switch = tgen.add_switch("sw9")
131 switch.add_link(tgen.gears["torm11"])
132 switch.add_link(tgen.gears["hostd11"])
133
134 # torm11-eth3 is connected to hostd12-eth0
135 switch = tgen.add_switch("sw10")
136 switch.add_link(tgen.gears["torm11"])
137 switch.add_link(tgen.gears["hostd12"])
138
139 ##################### torm12 ########################
140 # torm12-eth2 is connected to hostd11-eth1
141 switch = tgen.add_switch("sw11")
142 switch.add_link(tgen.gears["torm12"])
143 switch.add_link(tgen.gears["hostd11"])
144
145 # torm12-eth3 is connected to hostd12-eth1
146 switch = tgen.add_switch("sw12")
147 switch.add_link(tgen.gears["torm12"])
148 switch.add_link(tgen.gears["hostd12"])
149
150 ##################### torm21 ########################
151 # torm21-eth2 is connected to hostd21-eth0
152 switch = tgen.add_switch("sw13")
153 switch.add_link(tgen.gears["torm21"])
154 switch.add_link(tgen.gears["hostd21"])
155
156 # torm21-eth3 is connected to hostd22-eth0
157 switch = tgen.add_switch("sw14")
158 switch.add_link(tgen.gears["torm21"])
159 switch.add_link(tgen.gears["hostd22"])
160
161 ##################### torm22 ########################
162 # torm22-eth2 is connected to hostd21-eth1
163 switch = tgen.add_switch("sw15")
164 switch.add_link(tgen.gears["torm22"])
165 switch.add_link(tgen.gears["hostd21"])
166
167 # torm22-eth3 is connected to hostd22-eth1
168 switch = tgen.add_switch("sw16")
169 switch.add_link(tgen.gears["torm22"])
170 switch.add_link(tgen.gears["hostd22"])
171
172
173 #####################################################
174 ##
175 ## Tests starting
176 ##
177 #####################################################
178
179 tor_ips = {
180 "torm11": "192.168.100.15",
181 "torm12": "192.168.100.16",
182 "torm21": "192.168.100.17",
183 "torm22": "192.168.100.18",
184 }
185
186 svi_ips = {
187 "torm11": "45.0.0.2",
188 "torm12": "45.0.0.3",
189 "torm21": "45.0.0.4",
190 "torm22": "45.0.0.5",
191 }
192
193 tor_ips_rack_1 = {"torm11": "192.168.100.15", "torm12": "192.168.100.16"}
194
195 tor_ips_rack_2 = {"torm21": "192.168.100.17", "torm22": "192.168.100.18"}
196
197 host_es_map = {
198 "hostd11": "03:44:38:39:ff:ff:01:00:00:01",
199 "hostd12": "03:44:38:39:ff:ff:01:00:00:02",
200 "hostd21": "03:44:38:39:ff:ff:02:00:00:01",
201 "hostd22": "03:44:38:39:ff:ff:02:00:00:02",
202 }
203
204
205 def config_bond(node, bond_name, bond_members, bond_ad_sys_mac, br):
206 """
207 Used to setup bonds on the TORs and hosts for MH
208 """
209 node.run("ip link add dev %s type bond mode 802.3ad" % bond_name)
210 node.run("ip link set dev %s type bond lacp_rate 1" % bond_name)
211 node.run("ip link set dev %s type bond miimon 100" % bond_name)
212 node.run("ip link set dev %s type bond xmit_hash_policy layer3+4" % bond_name)
213 node.run("ip link set dev %s type bond min_links 1" % bond_name)
214 node.run(
215 "ip link set dev %s type bond ad_actor_system %s" % (bond_name, bond_ad_sys_mac)
216 )
217
218 for bond_member in bond_members:
219 node.run("ip link set dev %s down" % bond_member)
220 node.run("ip link set dev %s master %s" % (bond_member, bond_name))
221 node.run("ip link set dev %s up" % bond_member)
222
223 node.run("ip link set dev %s up" % bond_name)
224
225 # if bridge is specified add the bond as a bridge member
226 if br:
227 node.run(" ip link set dev %s master bridge" % bond_name)
228 node.run("/sbin/bridge link set dev %s priority 8" % bond_name)
229 node.run("/sbin/bridge vlan del vid 1 dev %s" % bond_name)
230 node.run("/sbin/bridge vlan del vid 1 untagged pvid dev %s" % bond_name)
231 node.run("/sbin/bridge vlan add vid 1000 dev %s" % bond_name)
232 node.run("/sbin/bridge vlan add vid 1000 untagged pvid dev %s" % bond_name)
233
234
235 def config_mcast_tunnel_termination_device(node):
236 """
237 The kernel requires a device to terminate VxLAN multicast tunnels
238 when EVPN-PIM is used for flooded traffic
239 """
240 node.run("ip link add dev ipmr-lo type dummy")
241 node.run("ip link set dev ipmr-lo mtu 16000")
242 node.run("ip link set dev ipmr-lo mode dormant")
243 node.run("ip link set dev ipmr-lo up")
244
245
246 def config_bridge(node):
247 """
248 Create a VLAN aware bridge
249 """
250 node.run("ip link add dev bridge type bridge stp_state 0")
251 node.run("ip link set dev bridge type bridge vlan_filtering 1")
252 node.run("ip link set dev bridge mtu 9216")
253 node.run("ip link set dev bridge type bridge ageing_time 1800")
254 node.run("ip link set dev bridge type bridge mcast_snooping 0")
255 node.run("ip link set dev bridge type bridge vlan_stats_enabled 1")
256 node.run("ip link set dev bridge up")
257 node.run("/sbin/bridge vlan add vid 1000 dev bridge")
258
259
260 def config_vxlan(node, node_ip):
261 """
262 Create a VxLAN device for VNI 1000 and add it to the bridge.
263 VLAN-1000 is mapped to VNI-1000.
264 """
265 node.run("ip link add dev vx-1000 type vxlan id 1000 dstport 4789")
266 node.run("ip link set dev vx-1000 type vxlan nolearning")
267 node.run("ip link set dev vx-1000 type vxlan local %s" % node_ip)
268 node.run("ip link set dev vx-1000 type vxlan ttl 64")
269 node.run("ip link set dev vx-1000 mtu 9152")
270 node.run("ip link set dev vx-1000 type vxlan dev ipmr-lo group 239.1.1.100")
271 node.run("ip link set dev vx-1000 up")
272
273 # bridge attrs
274 node.run("ip link set dev vx-1000 master bridge")
275 node.run("/sbin/bridge link set dev vx-1000 neigh_suppress on")
276 node.run("/sbin/bridge link set dev vx-1000 learning off")
277 node.run("/sbin/bridge link set dev vx-1000 priority 8")
278 node.run("/sbin/bridge vlan del vid 1 dev vx-1000")
279 node.run("/sbin/bridge vlan del vid 1 untagged pvid dev vx-1000")
280 node.run("/sbin/bridge vlan add vid 1000 dev vx-1000")
281 node.run("/sbin/bridge vlan add vid 1000 untagged pvid dev vx-1000")
282
283
284 def config_svi(node, svi_pip):
285 """
286 Create an SVI for VLAN 1000
287 """
288 node.run("ip link add link bridge name vlan1000 type vlan id 1000 protocol 802.1q")
289 node.run("ip addr add %s/24 dev vlan1000" % svi_pip)
290 node.run("ip link set dev vlan1000 up")
291 node.run("/sbin/sysctl net.ipv4.conf.vlan1000.arp_accept=1")
292 node.run("ip link add link vlan1000 name vlan1000-v0 type macvlan mode private")
293 node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.accept_dad=0")
294 node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits")
295 node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits=0")
296 node.run("ip link set dev vlan1000-v0 address 00:00:5e:00:01:01")
297 node.run("ip link set dev vlan1000-v0 up")
298 # metric 1024 is not working
299 node.run("ip addr add 45.0.0.1/24 dev vlan1000-v0")
300
301
302 def config_tor(tor_name, tor, tor_ip, svi_pip):
303 """
304 Create the bond/vxlan-bridge on the TOR which acts as VTEP and EPN-PE
305 """
306 # create a device for terminating VxLAN multicast tunnels
307 config_mcast_tunnel_termination_device(tor)
308
309 # create a vlan aware bridge
310 config_bridge(tor)
311
312 # create vxlan device and add it to bridge
313 config_vxlan(tor, tor_ip)
314
315 # create hostbonds and add them to the bridge
316 if "torm1" in tor_name:
317 sys_mac = "44:38:39:ff:ff:01"
318 else:
319 sys_mac = "44:38:39:ff:ff:02"
320 bond_member = tor_name + "-eth2"
321 config_bond(tor, "hostbond1", [bond_member], sys_mac, "bridge")
322
323 bond_member = tor_name + "-eth3"
324 config_bond(tor, "hostbond2", [bond_member], sys_mac, "bridge")
325
326 # create SVI
327 config_svi(tor, svi_pip)
328
329
330 def config_tors(tgen, tors):
331 for tor_name in tors:
332 tor = tgen.gears[tor_name]
333 config_tor(tor_name, tor, tor_ips.get(tor_name), svi_ips.get(tor_name))
334
335
336 def compute_host_ip_mac(host_name):
337 host_id = host_name.split("hostd")[1]
338 host_ip = "45.0.0." + host_id + "/24"
339 host_mac = "00:00:00:00:00:" + host_id
340
341 return host_ip, host_mac
342
343
344 def config_host(host_name, host):
345 """
346 Create the dual-attached bond on host nodes for MH
347 """
348 bond_members = []
349 bond_members.append(host_name + "-eth0")
350 bond_members.append(host_name + "-eth1")
351 bond_name = "torbond"
352 config_bond(host, bond_name, bond_members, "00:00:00:00:00:00", None)
353
354 host_ip, host_mac = compute_host_ip_mac(host_name)
355 host.run("ip addr add %s dev %s" % (host_ip, bond_name))
356 host.run("ip link set dev %s address %s" % (bond_name, host_mac))
357
358
359 def config_hosts(tgen, hosts):
360 for host_name in hosts:
361 host = tgen.gears[host_name]
362 config_host(host_name, host)
363
364
365 def setup_module(module):
366 "Setup topology"
367 tgen = Topogen(build_topo, module.__name__)
368 tgen.start_topology()
369
370 krel = platform.release()
371 if topotest.version_cmp(krel, "4.19") < 0:
372 tgen.errors = "kernel 4.19 needed for multihoming tests"
373 pytest.skip(tgen.errors)
374
375 tors = []
376 tors.append("torm11")
377 tors.append("torm12")
378 tors.append("torm21")
379 tors.append("torm22")
380 config_tors(tgen, tors)
381
382 hosts = []
383 hosts.append("hostd11")
384 hosts.append("hostd12")
385 hosts.append("hostd21")
386 hosts.append("hostd22")
387 config_hosts(tgen, hosts)
388
389 # tgen.mininet_cli()
390 # This is a sample of configuration loading.
391 router_list = tgen.routers()
392 for rname, router in router_list.items():
393 router.load_config(
394 TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
395 )
396 router.load_config(
397 TopoRouter.RD_PIM, os.path.join(CWD, "{}/pim.conf".format(rname))
398 )
399 router.load_config(
400 TopoRouter.RD_BGP, os.path.join(CWD, "{}/evpn.conf".format(rname))
401 )
402 tgen.start_router()
403 # tgen.mininet_cli()
404
405
406 def teardown_module(_mod):
407 "Teardown the pytest environment"
408 tgen = get_topogen()
409
410 # This function tears down the whole topology.
411 tgen.stop_topology()
412
413
414 def check_local_es(esi, vtep_ips, dut_name, down_vteps):
415 """
416 Check if ES peers are setup correctly on local ESs
417 """
418 peer_ips = []
419 if "torm1" in dut_name:
420 tor_ips_rack = tor_ips_rack_1
421 else:
422 tor_ips_rack = tor_ips_rack_2
423
424 for tor_name, tor_ip in tor_ips_rack.items():
425 if dut_name not in tor_name:
426 peer_ips.append(tor_ip)
427
428 # remove down VTEPs from the peer check list
429 peer_set = set(peer_ips)
430 down_vtep_set = set(down_vteps)
431 peer_set = peer_set - down_vtep_set
432
433 vtep_set = set(vtep_ips)
434 diff = peer_set.symmetric_difference(vtep_set)
435
436 return (esi, diff) if diff else None
437
438
439 def check_remote_es(esi, vtep_ips, dut_name, down_vteps):
440 """
441 Verify list of PEs associated with a remote ES
442 """
443 remote_ips = []
444
445 if "torm1" in dut_name:
446 tor_ips_rack = tor_ips_rack_2
447 else:
448 tor_ips_rack = tor_ips_rack_1
449
450 for tor_name, tor_ip in tor_ips_rack.items():
451 remote_ips.append(tor_ip)
452
453 # remove down VTEPs from the remote check list
454 remote_set = set(remote_ips)
455 down_vtep_set = set(down_vteps)
456 remote_set = remote_set - down_vtep_set
457
458 vtep_set = set(vtep_ips)
459 diff = remote_set.symmetric_difference(vtep_set)
460
461 return (esi, diff) if diff else None
462
463
464 def check_es(dut):
465 """
466 Verify list of PEs associated all ESs, local and remote
467 """
468 bgp_es = dut.vtysh_cmd("show bgp l2vp evpn es json")
469 bgp_es_json = json.loads(bgp_es)
470
471 result = None
472
473 expected_es_set = set([v for k, v in host_es_map.items()])
474 curr_es_set = []
475
476 # check is ES content is correct
477 for es in bgp_es_json:
478 esi = es["esi"]
479 curr_es_set.append(esi)
480 types = es["type"]
481 vtep_ips = []
482 for vtep in es.get("vteps", []):
483 vtep_ips.append(vtep["vtep_ip"])
484
485 if "local" in types:
486 result = check_local_es(esi, vtep_ips, dut.name, [])
487 else:
488 result = check_remote_es(esi, vtep_ips, dut.name, [])
489
490 if result:
491 return result
492
493 # check if all ESs are present
494 curr_es_set = set(curr_es_set)
495 result = curr_es_set.symmetric_difference(expected_es_set)
496
497 return result if result else None
498
499
500 def check_one_es(dut, esi, down_vteps):
501 """
502 Verify list of PEs associated all ESs, local and remote
503 """
504 bgp_es = dut.vtysh_cmd("show bgp l2vp evpn es %s json" % esi)
505 es = json.loads(bgp_es)
506
507 if not es:
508 return "esi %s not found" % esi
509
510 esi = es["esi"]
511 types = es["type"]
512 vtep_ips = []
513 for vtep in es.get("vteps", []):
514 vtep_ips.append(vtep["vtep_ip"])
515
516 if "local" in types:
517 result = check_local_es(esi, vtep_ips, dut.name, down_vteps)
518 else:
519 result = check_remote_es(esi, vtep_ips, dut.name, down_vteps)
520
521 return result
522
523
524 def test_evpn_es():
525 """
526 Two ES are setup on each rack. This test checks if -
527 1. ES peer has been added to the local ES (via Type-1/EAD route)
528 2. The remote ESs are setup with the right list of PEs (via Type-1)
529 """
530
531 tgen = get_topogen()
532
533 if tgen.routers_have_failure():
534 pytest.skip(tgen.errors)
535
536 dut_name = "torm11"
537 dut = tgen.gears[dut_name]
538 test_fn = partial(check_es, dut)
539 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
540
541 assertmsg = '"{}" ES content incorrect'.format(dut_name)
542 assert result is None, assertmsg
543 # tgen.mininet_cli()
544
545
546 def test_evpn_ead_update():
547 """
548 Flap a host link one the remote rack and check if the EAD updates
549 are sent/processed for the corresponding ESI
550 """
551 tgen = get_topogen()
552
553 if tgen.routers_have_failure():
554 pytest.skip(tgen.errors)
555
556 # dut on rack1 and host link flap on rack2
557 dut_name = "torm11"
558 dut = tgen.gears[dut_name]
559
560 remote_tor_name = "torm21"
561 remote_tor = tgen.gears[remote_tor_name]
562
563 host_name = "hostd21"
564 host = tgen.gears[host_name]
565 esi = host_es_map.get(host_name)
566
567 # check if the VTEP list is right to start with
568 down_vteps = []
569 test_fn = partial(check_one_es, dut, esi, down_vteps)
570 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
571 assertmsg = '"{}" ES content incorrect'.format(dut_name)
572 assert result is None, assertmsg
573
574 # down a remote host link and check if the EAD withdraw is rxed
575 # Note: LACP is not working as expected so I am temporarily shutting
576 # down the link on the remote TOR instead of the remote host
577 remote_tor.run("ip link set dev %s-%s down" % (remote_tor_name, "eth2"))
578 down_vteps.append(tor_ips.get(remote_tor_name))
579 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
580 assertmsg = '"{}" ES incorrect after remote link down'.format(dut_name)
581 assert result is None, assertmsg
582
583 # bring up remote host link and check if the EAD update is rxed
584 down_vteps.remove(tor_ips.get(remote_tor_name))
585 remote_tor.run("ip link set dev %s-%s up" % (remote_tor_name, "eth2"))
586 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
587 assertmsg = '"{}" ES incorrect after remote link flap'.format(dut_name)
588 assert result is None, assertmsg
589
590 # tgen.mininet_cli()
591
592
593 def ping_anycast_gw(tgen):
594 # ping the anycast gw from the local and remote hosts to populate
595 # the mac address on the PEs
596 python3_path = tgen.net.get_exec_path(["python3", "python"])
597 script_path = os.path.abspath(os.path.join(CWD, "../lib/scapy_sendpkt.py"))
598 intf = "torbond"
599 ipaddr = "45.0.0.1"
600 ping_cmd = [
601 python3_path,
602 script_path,
603 "--imports=Ether,ARP",
604 "--interface=" + intf,
605 'Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst="{}")'.format(ipaddr),
606 ]
607 for name in ("hostd11", "hostd21"):
608 host = tgen.net.hosts[name]
609 _, stdout, _ = host.cmd_status(ping_cmd, warn=False, stderr=subprocess.STDOUT)
610 stdout = stdout.strip()
611 if stdout:
612 host.logger.debug(
613 "%s: arping on %s for %s returned: %s", name, intf, ipaddr, stdout
614 )
615
616
617 def check_mac(dut, vni, mac, m_type, esi, intf, ping_gw=False, tgen=None):
618 """
619 checks if mac is present and if desination matches the one provided
620 """
621
622 if ping_gw:
623 ping_anycast_gw(tgen)
624
625 out = dut.vtysh_cmd("show evpn mac vni %d mac %s json" % (vni, mac))
626
627 mac_js = json.loads(out)
628 for mac, info in mac_js.items():
629 tmp_esi = info.get("esi", "")
630 tmp_m_type = info.get("type", "")
631 tmp_intf = info.get("intf", "") if tmp_m_type == "local" else ""
632 if tmp_esi == esi and tmp_m_type == m_type and intf == intf:
633 return None
634
635 return "invalid vni %d mac %s out %s" % (vni, mac, mac_js)
636
637
638 def test_evpn_mac():
639 """
640 1. Add a MAC on hostd11 and check if the MAC is synced between
641 torm11 and torm12. And installed as a local MAC.
642 2. Add a MAC on hostd21 and check if the MAC is installed as a
643 remote MAC on torm11 and torm12
644 """
645
646 tgen = get_topogen()
647
648 local_host = tgen.gears["hostd11"]
649 remote_host = tgen.gears["hostd21"]
650 tors = []
651 tors.append(tgen.gears["torm11"])
652 tors.append(tgen.gears["torm12"])
653
654 vni = 1000
655
656 # check if the rack-1 host MAC is present on all rack-1 PEs
657 # and points to local access port
658 m_type = "local"
659 _, mac = compute_host_ip_mac(local_host.name)
660 esi = host_es_map.get(local_host.name)
661 intf = "hostbond1"
662
663 for tor in tors:
664 test_fn = partial(check_mac, tor, vni, mac, m_type, esi, intf, True, tgen)
665 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
666 assertmsg = '"{}" local MAC content incorrect'.format(tor.name)
667 assert result is None, assertmsg
668
669 # check if the rack-2 host MAC is present on all rack-1 PEs
670 # and points to the remote ES destination
671 m_type = "remote"
672 _, mac = compute_host_ip_mac(remote_host.name)
673 esi = host_es_map.get(remote_host.name)
674 intf = ""
675
676 for tor in tors:
677 test_fn = partial(check_mac, tor, vni, mac, m_type, esi, intf)
678 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
679 assertmsg = '"{}" remote MAC content incorrect'.format(tor.name)
680 assert result is None, assertmsg
681
682
683 def check_df_role(dut, esi, role):
684 """
685 Return error string if the df role on the dut is different
686 """
687 es_json = dut.vtysh_cmd("show evpn es %s json" % esi)
688 es = json.loads(es_json)
689
690 if not es:
691 return "esi %s not found" % esi
692
693 flags = es.get("flags", [])
694 curr_role = "nonDF" if "nonDF" in flags else "DF"
695
696 if curr_role != role:
697 return "%s is %s for %s" % (dut.name, curr_role, esi)
698
699 return None
700
701
702 def test_evpn_df():
703 """
704 1. Check the DF role on all the PEs on rack-1.
705 2. Increase the DF preference on the non-DF and check if it becomes
706 the DF winner.
707 """
708
709 tgen = get_topogen()
710
711 if tgen.routers_have_failure():
712 pytest.skip(tgen.errors)
713
714 # We will run the tests on just one ES
715 esi = host_es_map.get("hostd11")
716 intf = "hostbond1"
717
718 tors = []
719 tors.append(tgen.gears["torm11"])
720 tors.append(tgen.gears["torm12"])
721 df_node = "torm11"
722
723 # check roles on rack-1
724 for tor in tors:
725 role = "DF" if tor.name == df_node else "nonDF"
726 test_fn = partial(check_df_role, tor, esi, role)
727 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
728 assertmsg = '"{}" DF role incorrect'.format(tor.name)
729 assert result is None, assertmsg
730
731 # change df preference on the nonDF to make it the df
732 torm12 = tgen.gears["torm12"]
733 torm12.vtysh_cmd("conf\ninterface %s\nevpn mh es-df-pref %d" % (intf, 60000))
734 df_node = "torm12"
735
736 # re-check roles on rack-1; we should have a new winner
737 for tor in tors:
738 role = "DF" if tor.name == df_node else "nonDF"
739 test_fn = partial(check_df_role, tor, esi, role)
740 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
741 assertmsg = '"{}" DF role incorrect'.format(tor.name)
742 assert result is None, assertmsg
743
744 # tgen.mininet_cli()
745
746
747 def check_protodown_rc(dut, protodown_rc):
748 """
749 check if specified protodown reason code is set
750 """
751
752 out = dut.vtysh_cmd("show evpn json")
753
754 evpn_js = json.loads(out)
755 tmp_rc = evpn_js.get("protodownReasons", [])
756
757 if protodown_rc:
758 if protodown_rc not in tmp_rc:
759 return "protodown %s missing in %s" % (protodown_rc, tmp_rc)
760 else:
761 if tmp_rc:
762 return "unexpected protodown rc %s" % (tmp_rc)
763
764 return None
765
766
767 def test_evpn_uplink_tracking():
768 """
769 1. Wait for access ports to come out of startup-delay
770 2. disable uplinks and check if access ports have been protodowned
771 3. enable uplinks and check if access ports have been moved out
772 of protodown
773 """
774
775 tgen = get_topogen()
776
777 dut_name = "torm11"
778 dut = tgen.gears[dut_name]
779
780 # wait for protodown rc to clear after startup
781 test_fn = partial(check_protodown_rc, dut, None)
782 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
783 assertmsg = '"{}" protodown rc incorrect'.format(dut_name)
784 assert result is None, assertmsg
785
786 # disable the uplinks
787 dut.run("ip link set %s-eth0 down" % dut_name)
788 dut.run("ip link set %s-eth1 down" % dut_name)
789
790 # check if the access ports have been protodowned
791 test_fn = partial(check_protodown_rc, dut, "uplinkDown")
792 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
793 assertmsg = '"{}" protodown rc incorrect'.format(dut_name)
794 assert result is None, assertmsg
795
796 # enable the uplinks
797 dut.run("ip link set %s-eth0 up" % dut_name)
798 dut.run("ip link set %s-eth1 up" % dut_name)
799
800 # check if the access ports have been moved out of protodown
801 test_fn = partial(check_protodown_rc, dut, None)
802 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
803 assertmsg = '"{}" protodown rc incorrect'.format(dut_name)
804 assert result is None, assertmsg
805
806
807 if __name__ == "__main__":
808 args = ["-s"] + sys.argv[1:]
809 sys.exit(pytest.main(args))