]> git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/bgp_evpn_mh/test_evpn_mh.py
Merge pull request #12798 from donaldsharp/rib_match_multicast
[mirror_frr.git] / tests / topotests / bgp_evpn_mh / test_evpn_mh.py
1 #!/usr/bin/env python
2 # SPDX-License-Identifier: ISC
3
4 #
5 # test_evpn_mh.py
6 #
7 # Copyright (c) 2020 by
8 # Cumulus Networks, Inc.
9 # Anuradha Karuppiah
10 #
11
12 """
13 test_evpn_mh.py: Testing EVPN multihoming
14
15 """
16
17 import os
18 import sys
19 import subprocess
20 from functools import partial
21
22 import pytest
23 import json
24 import platform
25 from functools import partial
26
27 pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
28
29 # Save the Current Working Directory to find configuration files.
30 CWD = os.path.dirname(os.path.realpath(__file__))
31 sys.path.append(os.path.join(CWD, "../"))
32
33 # pylint: disable=C0413
34 # Import topogen and topotest helpers
35 from lib import topotest
36
37 # Required to instantiate the topology builder class.
38 from lib.topogen import Topogen, TopoRouter, get_topogen
39
40 pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
41
42 #####################################################
43 ##
44 ## Network Topology Definition
45 ##
46 ## See topology picture at evpn-mh-topo-tests.pdf
47 #####################################################
48
49
50 def build_topo(tgen):
51 """
52 EVPN Multihoming Topology -
53 1. Two level CLOS
54 2. Two spine switches - spine1, spine2
55 3. Two racks with Top-of-Rack switches per rack - tormx1, tormx2
56 4. Two dual attached hosts per-rack - hostdx1, hostdx2
57 """
58
59 tgen.add_router("spine1")
60 tgen.add_router("spine2")
61 tgen.add_router("torm11")
62 tgen.add_router("torm12")
63 tgen.add_router("torm21")
64 tgen.add_router("torm22")
65 tgen.add_router("hostd11")
66 tgen.add_router("hostd12")
67 tgen.add_router("hostd21")
68 tgen.add_router("hostd22")
69
70 # On main router
71 # First switch is for a dummy interface (for local network)
72
73 ##################### spine1 ########################
74 # spine1-eth0 is connected to torm11-eth0
75 switch = tgen.add_switch("sw1")
76 switch.add_link(tgen.gears["spine1"])
77 switch.add_link(tgen.gears["torm11"])
78
79 # spine1-eth1 is connected to torm12-eth0
80 switch = tgen.add_switch("sw2")
81 switch.add_link(tgen.gears["spine1"])
82 switch.add_link(tgen.gears["torm12"])
83
84 # spine1-eth2 is connected to torm21-eth0
85 switch = tgen.add_switch("sw3")
86 switch.add_link(tgen.gears["spine1"])
87 switch.add_link(tgen.gears["torm21"])
88
89 # spine1-eth3 is connected to torm22-eth0
90 switch = tgen.add_switch("sw4")
91 switch.add_link(tgen.gears["spine1"])
92 switch.add_link(tgen.gears["torm22"])
93
94 ##################### spine2 ########################
95 # spine2-eth0 is connected to torm11-eth1
96 switch = tgen.add_switch("sw5")
97 switch.add_link(tgen.gears["spine2"])
98 switch.add_link(tgen.gears["torm11"])
99
100 # spine2-eth1 is connected to torm12-eth1
101 switch = tgen.add_switch("sw6")
102 switch.add_link(tgen.gears["spine2"])
103 switch.add_link(tgen.gears["torm12"])
104
105 # spine2-eth2 is connected to torm21-eth1
106 switch = tgen.add_switch("sw7")
107 switch.add_link(tgen.gears["spine2"])
108 switch.add_link(tgen.gears["torm21"])
109
110 # spine2-eth3 is connected to torm22-eth1
111 switch = tgen.add_switch("sw8")
112 switch.add_link(tgen.gears["spine2"])
113 switch.add_link(tgen.gears["torm22"])
114
115 ##################### torm11 ########################
116 # torm11-eth2 is connected to hostd11-eth0
117 switch = tgen.add_switch("sw9")
118 switch.add_link(tgen.gears["torm11"])
119 switch.add_link(tgen.gears["hostd11"])
120
121 # torm11-eth3 is connected to hostd12-eth0
122 switch = tgen.add_switch("sw10")
123 switch.add_link(tgen.gears["torm11"])
124 switch.add_link(tgen.gears["hostd12"])
125
126 ##################### torm12 ########################
127 # torm12-eth2 is connected to hostd11-eth1
128 switch = tgen.add_switch("sw11")
129 switch.add_link(tgen.gears["torm12"])
130 switch.add_link(tgen.gears["hostd11"])
131
132 # torm12-eth3 is connected to hostd12-eth1
133 switch = tgen.add_switch("sw12")
134 switch.add_link(tgen.gears["torm12"])
135 switch.add_link(tgen.gears["hostd12"])
136
137 ##################### torm21 ########################
138 # torm21-eth2 is connected to hostd21-eth0
139 switch = tgen.add_switch("sw13")
140 switch.add_link(tgen.gears["torm21"])
141 switch.add_link(tgen.gears["hostd21"])
142
143 # torm21-eth3 is connected to hostd22-eth0
144 switch = tgen.add_switch("sw14")
145 switch.add_link(tgen.gears["torm21"])
146 switch.add_link(tgen.gears["hostd22"])
147
148 ##################### torm22 ########################
149 # torm22-eth2 is connected to hostd21-eth1
150 switch = tgen.add_switch("sw15")
151 switch.add_link(tgen.gears["torm22"])
152 switch.add_link(tgen.gears["hostd21"])
153
154 # torm22-eth3 is connected to hostd22-eth1
155 switch = tgen.add_switch("sw16")
156 switch.add_link(tgen.gears["torm22"])
157 switch.add_link(tgen.gears["hostd22"])
158
159
160 #####################################################
161 ##
162 ## Tests starting
163 ##
164 #####################################################
165
166 tor_ips = {
167 "torm11": "192.168.100.15",
168 "torm12": "192.168.100.16",
169 "torm21": "192.168.100.17",
170 "torm22": "192.168.100.18",
171 }
172
173 svi_ips = {
174 "torm11": "45.0.0.2",
175 "torm12": "45.0.0.3",
176 "torm21": "45.0.0.4",
177 "torm22": "45.0.0.5",
178 }
179
180 tor_ips_rack_1 = {"torm11": "192.168.100.15", "torm12": "192.168.100.16"}
181
182 tor_ips_rack_2 = {"torm21": "192.168.100.17", "torm22": "192.168.100.18"}
183
184 host_es_map = {
185 "hostd11": "03:44:38:39:ff:ff:01:00:00:01",
186 "hostd12": "03:44:38:39:ff:ff:01:00:00:02",
187 "hostd21": "03:44:38:39:ff:ff:02:00:00:01",
188 "hostd22": "03:44:38:39:ff:ff:02:00:00:02",
189 }
190
191
192 def config_bond(node, bond_name, bond_members, bond_ad_sys_mac, br):
193 """
194 Used to setup bonds on the TORs and hosts for MH
195 """
196 node.run("ip link add dev %s type bond mode 802.3ad" % bond_name)
197 node.run("ip link set dev %s type bond lacp_rate 1" % bond_name)
198 node.run("ip link set dev %s type bond miimon 100" % bond_name)
199 node.run("ip link set dev %s type bond xmit_hash_policy layer3+4" % bond_name)
200 node.run("ip link set dev %s type bond min_links 1" % bond_name)
201 node.run(
202 "ip link set dev %s type bond ad_actor_system %s" % (bond_name, bond_ad_sys_mac)
203 )
204
205 for bond_member in bond_members:
206 node.run("ip link set dev %s down" % bond_member)
207 node.run("ip link set dev %s master %s" % (bond_member, bond_name))
208 node.run("ip link set dev %s up" % bond_member)
209
210 node.run("ip link set dev %s up" % bond_name)
211
212 # if bridge is specified add the bond as a bridge member
213 if br:
214 node.run(" ip link set dev %s master bridge" % bond_name)
215 node.run("/sbin/bridge link set dev %s priority 8" % bond_name)
216 node.run("/sbin/bridge vlan del vid 1 dev %s" % bond_name)
217 node.run("/sbin/bridge vlan del vid 1 untagged pvid dev %s" % bond_name)
218 node.run("/sbin/bridge vlan add vid 1000 dev %s" % bond_name)
219 node.run("/sbin/bridge vlan add vid 1000 untagged pvid dev %s" % bond_name)
220
221
222 def config_mcast_tunnel_termination_device(node):
223 """
224 The kernel requires a device to terminate VxLAN multicast tunnels
225 when EVPN-PIM is used for flooded traffic
226 """
227 node.run("ip link add dev ipmr-lo type dummy")
228 node.run("ip link set dev ipmr-lo mtu 16000")
229 node.run("ip link set dev ipmr-lo mode dormant")
230 node.run("ip link set dev ipmr-lo up")
231
232
233 def config_bridge(node):
234 """
235 Create a VLAN aware bridge
236 """
237 node.run("ip link add dev bridge type bridge stp_state 0")
238 node.run("ip link set dev bridge type bridge vlan_filtering 1")
239 node.run("ip link set dev bridge mtu 9216")
240 node.run("ip link set dev bridge type bridge ageing_time 1800")
241 node.run("ip link set dev bridge type bridge mcast_snooping 0")
242 node.run("ip link set dev bridge type bridge vlan_stats_enabled 1")
243 node.run("ip link set dev bridge up")
244 node.run("/sbin/bridge vlan add vid 1000 dev bridge")
245
246
247 def config_vxlan(node, node_ip):
248 """
249 Create a VxLAN device for VNI 1000 and add it to the bridge.
250 VLAN-1000 is mapped to VNI-1000.
251 """
252 node.run("ip link add dev vx-1000 type vxlan id 1000 dstport 4789")
253 node.run("ip link set dev vx-1000 type vxlan nolearning")
254 node.run("ip link set dev vx-1000 type vxlan local %s" % node_ip)
255 node.run("ip link set dev vx-1000 type vxlan ttl 64")
256 node.run("ip link set dev vx-1000 mtu 9152")
257 node.run("ip link set dev vx-1000 type vxlan dev ipmr-lo group 239.1.1.100")
258 node.run("ip link set dev vx-1000 up")
259
260 # bridge attrs
261 node.run("ip link set dev vx-1000 master bridge")
262 node.run("/sbin/bridge link set dev vx-1000 neigh_suppress on")
263 node.run("/sbin/bridge link set dev vx-1000 learning off")
264 node.run("/sbin/bridge link set dev vx-1000 priority 8")
265 node.run("/sbin/bridge vlan del vid 1 dev vx-1000")
266 node.run("/sbin/bridge vlan del vid 1 untagged pvid dev vx-1000")
267 node.run("/sbin/bridge vlan add vid 1000 dev vx-1000")
268 node.run("/sbin/bridge vlan add vid 1000 untagged pvid dev vx-1000")
269
270
271 def config_svi(node, svi_pip):
272 """
273 Create an SVI for VLAN 1000
274 """
275 node.run("ip link add link bridge name vlan1000 type vlan id 1000 protocol 802.1q")
276 node.run("ip addr add %s/24 dev vlan1000" % svi_pip)
277 node.run("ip link set dev vlan1000 up")
278 node.run("/sbin/sysctl net.ipv4.conf.vlan1000.arp_accept=1")
279 node.run("ip link add link vlan1000 name vlan1000-v0 type macvlan mode private")
280 node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.accept_dad=0")
281 node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits")
282 node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits=0")
283 node.run("ip link set dev vlan1000-v0 address 00:00:5e:00:01:01")
284 node.run("ip link set dev vlan1000-v0 up")
285 # metric 1024 is not working
286 node.run("ip addr add 45.0.0.1/24 dev vlan1000-v0")
287
288
289 def config_tor(tor_name, tor, tor_ip, svi_pip):
290 """
291 Create the bond/vxlan-bridge on the TOR which acts as VTEP and EPN-PE
292 """
293 # create a device for terminating VxLAN multicast tunnels
294 config_mcast_tunnel_termination_device(tor)
295
296 # create a vlan aware bridge
297 config_bridge(tor)
298
299 # create vxlan device and add it to bridge
300 config_vxlan(tor, tor_ip)
301
302 # create hostbonds and add them to the bridge
303 if "torm1" in tor_name:
304 sys_mac = "44:38:39:ff:ff:01"
305 else:
306 sys_mac = "44:38:39:ff:ff:02"
307 bond_member = tor_name + "-eth2"
308 config_bond(tor, "hostbond1", [bond_member], sys_mac, "bridge")
309
310 bond_member = tor_name + "-eth3"
311 config_bond(tor, "hostbond2", [bond_member], sys_mac, "bridge")
312
313 # create SVI
314 config_svi(tor, svi_pip)
315
316
317 def config_tors(tgen, tors):
318 for tor_name in tors:
319 tor = tgen.gears[tor_name]
320 config_tor(tor_name, tor, tor_ips.get(tor_name), svi_ips.get(tor_name))
321
322
323 def compute_host_ip_mac(host_name):
324 host_id = host_name.split("hostd")[1]
325 host_ip = "45.0.0." + host_id + "/24"
326 host_mac = "00:00:00:00:00:" + host_id
327
328 return host_ip, host_mac
329
330
331 def config_host(host_name, host):
332 """
333 Create the dual-attached bond on host nodes for MH
334 """
335 bond_members = []
336 bond_members.append(host_name + "-eth0")
337 bond_members.append(host_name + "-eth1")
338 bond_name = "torbond"
339 config_bond(host, bond_name, bond_members, "00:00:00:00:00:00", None)
340
341 host_ip, host_mac = compute_host_ip_mac(host_name)
342 host.run("ip addr add %s dev %s" % (host_ip, bond_name))
343 host.run("ip link set dev %s address %s" % (bond_name, host_mac))
344
345
346 def config_hosts(tgen, hosts):
347 for host_name in hosts:
348 host = tgen.gears[host_name]
349 config_host(host_name, host)
350
351
352 def setup_module(module):
353 "Setup topology"
354 tgen = Topogen(build_topo, module.__name__)
355 tgen.start_topology()
356
357 krel = platform.release()
358 if topotest.version_cmp(krel, "4.19") < 0:
359 tgen.errors = "kernel 4.19 needed for multihoming tests"
360 pytest.skip(tgen.errors)
361
362 tors = []
363 tors.append("torm11")
364 tors.append("torm12")
365 tors.append("torm21")
366 tors.append("torm22")
367 config_tors(tgen, tors)
368
369 hosts = []
370 hosts.append("hostd11")
371 hosts.append("hostd12")
372 hosts.append("hostd21")
373 hosts.append("hostd22")
374 config_hosts(tgen, hosts)
375
376 # tgen.mininet_cli()
377 # This is a sample of configuration loading.
378 router_list = tgen.routers()
379 for rname, router in router_list.items():
380 router.load_config(
381 TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
382 )
383 router.load_config(
384 TopoRouter.RD_PIM, os.path.join(CWD, "{}/pim.conf".format(rname))
385 )
386 router.load_config(
387 TopoRouter.RD_BGP, os.path.join(CWD, "{}/evpn.conf".format(rname))
388 )
389 tgen.start_router()
390 # tgen.mininet_cli()
391
392
393 def teardown_module(_mod):
394 "Teardown the pytest environment"
395 tgen = get_topogen()
396
397 # This function tears down the whole topology.
398 tgen.stop_topology()
399
400
401 def check_local_es(esi, vtep_ips, dut_name, down_vteps):
402 """
403 Check if ES peers are setup correctly on local ESs
404 """
405 peer_ips = []
406 if "torm1" in dut_name:
407 tor_ips_rack = tor_ips_rack_1
408 else:
409 tor_ips_rack = tor_ips_rack_2
410
411 for tor_name, tor_ip in tor_ips_rack.items():
412 if dut_name not in tor_name:
413 peer_ips.append(tor_ip)
414
415 # remove down VTEPs from the peer check list
416 peer_set = set(peer_ips)
417 down_vtep_set = set(down_vteps)
418 peer_set = peer_set - down_vtep_set
419
420 vtep_set = set(vtep_ips)
421 diff = peer_set.symmetric_difference(vtep_set)
422
423 return (esi, diff) if diff else None
424
425
426 def check_remote_es(esi, vtep_ips, dut_name, down_vteps):
427 """
428 Verify list of PEs associated with a remote ES
429 """
430 remote_ips = []
431
432 if "torm1" in dut_name:
433 tor_ips_rack = tor_ips_rack_2
434 else:
435 tor_ips_rack = tor_ips_rack_1
436
437 for tor_name, tor_ip in tor_ips_rack.items():
438 remote_ips.append(tor_ip)
439
440 # remove down VTEPs from the remote check list
441 remote_set = set(remote_ips)
442 down_vtep_set = set(down_vteps)
443 remote_set = remote_set - down_vtep_set
444
445 vtep_set = set(vtep_ips)
446 diff = remote_set.symmetric_difference(vtep_set)
447
448 return (esi, diff) if diff else None
449
450
451 def check_es(dut):
452 """
453 Verify list of PEs associated all ESs, local and remote
454 """
455 bgp_es = dut.vtysh_cmd("show bgp l2vp evpn es json")
456 bgp_es_json = json.loads(bgp_es)
457
458 result = None
459
460 expected_es_set = set([v for k, v in host_es_map.items()])
461 curr_es_set = []
462
463 # check is ES content is correct
464 for es in bgp_es_json:
465 esi = es["esi"]
466 curr_es_set.append(esi)
467 types = es["type"]
468 vtep_ips = []
469 for vtep in es.get("vteps", []):
470 vtep_ips.append(vtep["vtep_ip"])
471
472 if "local" in types:
473 result = check_local_es(esi, vtep_ips, dut.name, [])
474 else:
475 result = check_remote_es(esi, vtep_ips, dut.name, [])
476
477 if result:
478 return result
479
480 # check if all ESs are present
481 curr_es_set = set(curr_es_set)
482 result = curr_es_set.symmetric_difference(expected_es_set)
483
484 return result if result else None
485
486
487 def check_one_es(dut, esi, down_vteps):
488 """
489 Verify list of PEs associated all ESs, local and remote
490 """
491 bgp_es = dut.vtysh_cmd("show bgp l2vp evpn es %s json" % esi)
492 es = json.loads(bgp_es)
493
494 if not es:
495 return "esi %s not found" % esi
496
497 esi = es["esi"]
498 types = es["type"]
499 vtep_ips = []
500 for vtep in es.get("vteps", []):
501 vtep_ips.append(vtep["vtep_ip"])
502
503 if "local" in types:
504 result = check_local_es(esi, vtep_ips, dut.name, down_vteps)
505 else:
506 result = check_remote_es(esi, vtep_ips, dut.name, down_vteps)
507
508 return result
509
510
511 def test_evpn_es():
512 """
513 Two ES are setup on each rack. This test checks if -
514 1. ES peer has been added to the local ES (via Type-1/EAD route)
515 2. The remote ESs are setup with the right list of PEs (via Type-1)
516 """
517
518 tgen = get_topogen()
519
520 if tgen.routers_have_failure():
521 pytest.skip(tgen.errors)
522
523 dut_name = "torm11"
524 dut = tgen.gears[dut_name]
525 test_fn = partial(check_es, dut)
526 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
527
528 assertmsg = '"{}" ES content incorrect'.format(dut_name)
529 assert result is None, assertmsg
530 # tgen.mininet_cli()
531
532
533 def test_evpn_ead_update():
534 """
535 Flap a host link one the remote rack and check if the EAD updates
536 are sent/processed for the corresponding ESI
537 """
538 tgen = get_topogen()
539
540 if tgen.routers_have_failure():
541 pytest.skip(tgen.errors)
542
543 # dut on rack1 and host link flap on rack2
544 dut_name = "torm11"
545 dut = tgen.gears[dut_name]
546
547 remote_tor_name = "torm21"
548 remote_tor = tgen.gears[remote_tor_name]
549
550 host_name = "hostd21"
551 host = tgen.gears[host_name]
552 esi = host_es_map.get(host_name)
553
554 # check if the VTEP list is right to start with
555 down_vteps = []
556 test_fn = partial(check_one_es, dut, esi, down_vteps)
557 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
558 assertmsg = '"{}" ES content incorrect'.format(dut_name)
559 assert result is None, assertmsg
560
561 # down a remote host link and check if the EAD withdraw is rxed
562 # Note: LACP is not working as expected so I am temporarily shutting
563 # down the link on the remote TOR instead of the remote host
564 remote_tor.run("ip link set dev %s-%s down" % (remote_tor_name, "eth2"))
565 down_vteps.append(tor_ips.get(remote_tor_name))
566 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
567 assertmsg = '"{}" ES incorrect after remote link down'.format(dut_name)
568 assert result is None, assertmsg
569
570 # bring up remote host link and check if the EAD update is rxed
571 down_vteps.remove(tor_ips.get(remote_tor_name))
572 remote_tor.run("ip link set dev %s-%s up" % (remote_tor_name, "eth2"))
573 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
574 assertmsg = '"{}" ES incorrect after remote link flap'.format(dut_name)
575 assert result is None, assertmsg
576
577 # tgen.mininet_cli()
578
579
580 def ping_anycast_gw(tgen):
581 # ping the anycast gw from the local and remote hosts to populate
582 # the mac address on the PEs
583 python3_path = tgen.net.get_exec_path(["python3", "python"])
584 script_path = os.path.abspath(os.path.join(CWD, "../lib/scapy_sendpkt.py"))
585 intf = "torbond"
586 ipaddr = "45.0.0.1"
587 ping_cmd = [
588 python3_path,
589 script_path,
590 "--imports=Ether,ARP",
591 "--interface=" + intf,
592 'Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst="{}")'.format(ipaddr),
593 ]
594 for name in ("hostd11", "hostd21"):
595 host = tgen.net.hosts[name]
596 _, stdout, _ = host.cmd_status(ping_cmd, warn=False, stderr=subprocess.STDOUT)
597 stdout = stdout.strip()
598 if stdout:
599 host.logger.debug(
600 "%s: arping on %s for %s returned: %s", name, intf, ipaddr, stdout
601 )
602
603
604 def check_mac(dut, vni, mac, m_type, esi, intf, ping_gw=False, tgen=None):
605 """
606 checks if mac is present and if desination matches the one provided
607 """
608
609 if ping_gw:
610 ping_anycast_gw(tgen)
611
612 out = dut.vtysh_cmd("show evpn mac vni %d mac %s json" % (vni, mac))
613
614 mac_js = json.loads(out)
615 for mac, info in mac_js.items():
616 tmp_esi = info.get("esi", "")
617 tmp_m_type = info.get("type", "")
618 tmp_intf = info.get("intf", "") if tmp_m_type == "local" else ""
619 if tmp_esi == esi and tmp_m_type == m_type and intf == intf:
620 return None
621
622 return "invalid vni %d mac %s out %s" % (vni, mac, mac_js)
623
624
625 def test_evpn_mac():
626 """
627 1. Add a MAC on hostd11 and check if the MAC is synced between
628 torm11 and torm12. And installed as a local MAC.
629 2. Add a MAC on hostd21 and check if the MAC is installed as a
630 remote MAC on torm11 and torm12
631 """
632
633 tgen = get_topogen()
634
635 local_host = tgen.gears["hostd11"]
636 remote_host = tgen.gears["hostd21"]
637 tors = []
638 tors.append(tgen.gears["torm11"])
639 tors.append(tgen.gears["torm12"])
640
641 vni = 1000
642
643 # check if the rack-1 host MAC is present on all rack-1 PEs
644 # and points to local access port
645 m_type = "local"
646 _, mac = compute_host_ip_mac(local_host.name)
647 esi = host_es_map.get(local_host.name)
648 intf = "hostbond1"
649
650 for tor in tors:
651 test_fn = partial(check_mac, tor, vni, mac, m_type, esi, intf, True, tgen)
652 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
653 assertmsg = '"{}" local MAC content incorrect'.format(tor.name)
654 assert result is None, assertmsg
655
656 # check if the rack-2 host MAC is present on all rack-1 PEs
657 # and points to the remote ES destination
658 m_type = "remote"
659 _, mac = compute_host_ip_mac(remote_host.name)
660 esi = host_es_map.get(remote_host.name)
661 intf = ""
662
663 for tor in tors:
664 test_fn = partial(check_mac, tor, vni, mac, m_type, esi, intf)
665 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
666 assertmsg = '"{}" remote MAC content incorrect'.format(tor.name)
667 assert result is None, assertmsg
668
669
670 def check_df_role(dut, esi, role):
671 """
672 Return error string if the df role on the dut is different
673 """
674 es_json = dut.vtysh_cmd("show evpn es %s json" % esi)
675 es = json.loads(es_json)
676
677 if not es:
678 return "esi %s not found" % esi
679
680 flags = es.get("flags", [])
681 curr_role = "nonDF" if "nonDF" in flags else "DF"
682
683 if curr_role != role:
684 return "%s is %s for %s" % (dut.name, curr_role, esi)
685
686 return None
687
688
689 def test_evpn_df():
690 """
691 1. Check the DF role on all the PEs on rack-1.
692 2. Increase the DF preference on the non-DF and check if it becomes
693 the DF winner.
694 """
695
696 tgen = get_topogen()
697
698 if tgen.routers_have_failure():
699 pytest.skip(tgen.errors)
700
701 # We will run the tests on just one ES
702 esi = host_es_map.get("hostd11")
703 intf = "hostbond1"
704
705 tors = []
706 tors.append(tgen.gears["torm11"])
707 tors.append(tgen.gears["torm12"])
708 df_node = "torm11"
709
710 # check roles on rack-1
711 for tor in tors:
712 role = "DF" if tor.name == df_node else "nonDF"
713 test_fn = partial(check_df_role, tor, esi, role)
714 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
715 assertmsg = '"{}" DF role incorrect'.format(tor.name)
716 assert result is None, assertmsg
717
718 # change df preference on the nonDF to make it the df
719 torm12 = tgen.gears["torm12"]
720 torm12.vtysh_cmd("conf\ninterface %s\nevpn mh es-df-pref %d" % (intf, 60000))
721 df_node = "torm12"
722
723 # re-check roles on rack-1; we should have a new winner
724 for tor in tors:
725 role = "DF" if tor.name == df_node else "nonDF"
726 test_fn = partial(check_df_role, tor, esi, role)
727 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
728 assertmsg = '"{}" DF role incorrect'.format(tor.name)
729 assert result is None, assertmsg
730
731 # tgen.mininet_cli()
732
733
734 def check_protodown_rc(dut, protodown_rc):
735 """
736 check if specified protodown reason code is set
737 """
738
739 out = dut.vtysh_cmd("show evpn json")
740
741 evpn_js = json.loads(out)
742 tmp_rc = evpn_js.get("protodownReasons", [])
743
744 if protodown_rc:
745 if protodown_rc not in tmp_rc:
746 return "protodown %s missing in %s" % (protodown_rc, tmp_rc)
747 else:
748 if tmp_rc:
749 return "unexpected protodown rc %s" % (tmp_rc)
750
751 return None
752
753
754 def test_evpn_uplink_tracking():
755 """
756 1. Wait for access ports to come out of startup-delay
757 2. disable uplinks and check if access ports have been protodowned
758 3. enable uplinks and check if access ports have been moved out
759 of protodown
760 """
761
762 tgen = get_topogen()
763
764 dut_name = "torm11"
765 dut = tgen.gears[dut_name]
766
767 # wait for protodown rc to clear after startup
768 test_fn = partial(check_protodown_rc, dut, None)
769 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
770 assertmsg = '"{}" protodown rc incorrect'.format(dut_name)
771 assert result is None, assertmsg
772
773 # disable the uplinks
774 dut.run("ip link set %s-eth0 down" % dut_name)
775 dut.run("ip link set %s-eth1 down" % dut_name)
776
777 # check if the access ports have been protodowned
778 test_fn = partial(check_protodown_rc, dut, "uplinkDown")
779 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
780 assertmsg = '"{}" protodown rc incorrect'.format(dut_name)
781 assert result is None, assertmsg
782
783 # enable the uplinks
784 dut.run("ip link set %s-eth0 up" % dut_name)
785 dut.run("ip link set %s-eth1 up" % dut_name)
786
787 # check if the access ports have been moved out of protodown
788 test_fn = partial(check_protodown_rc, dut, None)
789 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
790 assertmsg = '"{}" protodown rc incorrect'.format(dut_name)
791 assert result is None, assertmsg
792
793
794 if __name__ == "__main__":
795 args = ["-s"] + sys.argv[1:]
796 sys.exit(pytest.main(args))