]> git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/bgp_evpn_mh/test_evpn_mh.py
Merge pull request #8304 from mjstapp/fix_zmq_xref
[mirror_frr.git] / tests / topotests / bgp_evpn_mh / test_evpn_mh.py
1 #!/usr/bin/env python
2
3 #
4 # test_evpn_mh.py
5 #
6 # Copyright (c) 2020 by
7 # Cumulus Networks, Inc.
8 # Anuradha Karuppiah
9 #
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
13 # in all copies.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 # OF THIS SOFTWARE.
23 #
24
25 """
26 test_evpn_mh.py: Testing EVPN multihoming
27
28 """
29
30 import os
31 import re
32 import sys
33 import pytest
34 import json
35 import platform
36 from functools import partial
37
38 pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
39
40 # Save the Current Working Directory to find configuration files.
41 CWD = os.path.dirname(os.path.realpath(__file__))
42 sys.path.append(os.path.join(CWD, "../"))
43
44 # pylint: disable=C0413
45 # Import topogen and topotest helpers
46 from lib import topotest
47 from lib.topogen import Topogen, TopoRouter, get_topogen
48 from lib.topolog import logger
49
50 # Required to instantiate the topology builder class.
51 from mininet.topo import Topo
52
53 #####################################################
54 ##
55 ## Network Topology Definition
56 ##
57 ## See topology picture at evpn-mh-topo-tests.pdf
58 #####################################################
59
60
61 class NetworkTopo(Topo):
62 """
63 EVPN Multihoming Topology -
64 1. Two level CLOS
65 2. Two spine switches - spine1, spine2
66 3. Two racks with Top-of-Rack switches per rack - tormx1, tormx2
67 4. Two dual attached hosts per-rack - hostdx1, hostdx2
68 """
69
70 def build(self, **_opts):
71 "Build function"
72
73 tgen = get_topogen(self)
74
75 tgen.add_router("spine1")
76 tgen.add_router("spine2")
77 tgen.add_router("torm11")
78 tgen.add_router("torm12")
79 tgen.add_router("torm21")
80 tgen.add_router("torm22")
81 tgen.add_router("hostd11")
82 tgen.add_router("hostd12")
83 tgen.add_router("hostd21")
84 tgen.add_router("hostd22")
85
86 # On main router
87 # First switch is for a dummy interface (for local network)
88
89 ##################### spine1 ########################
90 # spine1-eth0 is connected to torm11-eth0
91 switch = tgen.add_switch("sw1")
92 switch.add_link(tgen.gears["spine1"])
93 switch.add_link(tgen.gears["torm11"])
94
95 # spine1-eth1 is connected to torm12-eth0
96 switch = tgen.add_switch("sw2")
97 switch.add_link(tgen.gears["spine1"])
98 switch.add_link(tgen.gears["torm12"])
99
100 # spine1-eth2 is connected to torm21-eth0
101 switch = tgen.add_switch("sw3")
102 switch.add_link(tgen.gears["spine1"])
103 switch.add_link(tgen.gears["torm21"])
104
105 # spine1-eth3 is connected to torm22-eth0
106 switch = tgen.add_switch("sw4")
107 switch.add_link(tgen.gears["spine1"])
108 switch.add_link(tgen.gears["torm22"])
109
110 ##################### spine2 ########################
111 # spine2-eth0 is connected to torm11-eth1
112 switch = tgen.add_switch("sw5")
113 switch.add_link(tgen.gears["spine2"])
114 switch.add_link(tgen.gears["torm11"])
115
116 # spine2-eth1 is connected to torm12-eth1
117 switch = tgen.add_switch("sw6")
118 switch.add_link(tgen.gears["spine2"])
119 switch.add_link(tgen.gears["torm12"])
120
121 # spine2-eth2 is connected to torm21-eth1
122 switch = tgen.add_switch("sw7")
123 switch.add_link(tgen.gears["spine2"])
124 switch.add_link(tgen.gears["torm21"])
125
126 # spine2-eth3 is connected to torm22-eth1
127 switch = tgen.add_switch("sw8")
128 switch.add_link(tgen.gears["spine2"])
129 switch.add_link(tgen.gears["torm22"])
130
131 ##################### torm11 ########################
132 # torm11-eth2 is connected to hostd11-eth0
133 switch = tgen.add_switch("sw9")
134 switch.add_link(tgen.gears["torm11"])
135 switch.add_link(tgen.gears["hostd11"])
136
137 # torm11-eth3 is connected to hostd12-eth0
138 switch = tgen.add_switch("sw10")
139 switch.add_link(tgen.gears["torm11"])
140 switch.add_link(tgen.gears["hostd12"])
141
142 ##################### torm12 ########################
143 # torm12-eth2 is connected to hostd11-eth1
144 switch = tgen.add_switch("sw11")
145 switch.add_link(tgen.gears["torm12"])
146 switch.add_link(tgen.gears["hostd11"])
147
148 # torm12-eth3 is connected to hostd12-eth1
149 switch = tgen.add_switch("sw12")
150 switch.add_link(tgen.gears["torm12"])
151 switch.add_link(tgen.gears["hostd12"])
152
153 ##################### torm21 ########################
154 # torm21-eth2 is connected to hostd21-eth0
155 switch = tgen.add_switch("sw13")
156 switch.add_link(tgen.gears["torm21"])
157 switch.add_link(tgen.gears["hostd21"])
158
159 # torm21-eth3 is connected to hostd22-eth0
160 switch = tgen.add_switch("sw14")
161 switch.add_link(tgen.gears["torm21"])
162 switch.add_link(tgen.gears["hostd22"])
163
164 ##################### torm22 ########################
165 # torm22-eth2 is connected to hostd21-eth1
166 switch = tgen.add_switch("sw15")
167 switch.add_link(tgen.gears["torm22"])
168 switch.add_link(tgen.gears["hostd21"])
169
170 # torm22-eth3 is connected to hostd22-eth1
171 switch = tgen.add_switch("sw16")
172 switch.add_link(tgen.gears["torm22"])
173 switch.add_link(tgen.gears["hostd22"])
174
175
176 #####################################################
177 ##
178 ## Tests starting
179 ##
180 #####################################################
181
182 tor_ips = {
183 "torm11": "192.168.100.15",
184 "torm12": "192.168.100.16",
185 "torm21": "192.168.100.17",
186 "torm22": "192.168.100.18",
187 }
188
189 svi_ips = {
190 "torm11": "45.0.0.2",
191 "torm12": "45.0.0.3",
192 "torm21": "45.0.0.4",
193 "torm22": "45.0.0.5",
194 }
195
196 tor_ips_rack_1 = {"torm11": "192.168.100.15", "torm12": "192.168.100.16"}
197
198 tor_ips_rack_2 = {"torm21": "192.168.100.17", "torm22": "192.168.100.18"}
199
200 host_es_map = {
201 "hostd11": "03:44:38:39:ff:ff:01:00:00:01",
202 "hostd12": "03:44:38:39:ff:ff:01:00:00:02",
203 "hostd21": "03:44:38:39:ff:ff:02:00:00:01",
204 "hostd22": "03:44:38:39:ff:ff:02:00:00:02",
205 }
206
207
208 def config_bond(node, bond_name, bond_members, bond_ad_sys_mac, br):
209 """
210 Used to setup bonds on the TORs and hosts for MH
211 """
212 node.run("ip link add dev %s type bond mode 802.3ad" % bond_name)
213 node.run("ip link set dev %s type bond lacp_rate 1" % bond_name)
214 node.run("ip link set dev %s type bond miimon 100" % bond_name)
215 node.run("ip link set dev %s type bond xmit_hash_policy layer3+4" % bond_name)
216 node.run("ip link set dev %s type bond min_links 1" % bond_name)
217 node.run(
218 "ip link set dev %s type bond ad_actor_system %s" % (bond_name, bond_ad_sys_mac)
219 )
220
221 for bond_member in bond_members:
222 node.run("ip link set dev %s down" % bond_member)
223 node.run("ip link set dev %s master %s" % (bond_member, bond_name))
224 node.run("ip link set dev %s up" % bond_member)
225
226 node.run("ip link set dev %s up" % bond_name)
227
228 # if bridge is specified add the bond as a bridge member
229 if br:
230 node.run(" ip link set dev %s master bridge" % bond_name)
231 node.run("/sbin/bridge link set dev %s priority 8" % bond_name)
232 node.run("/sbin/bridge vlan del vid 1 dev %s" % bond_name)
233 node.run("/sbin/bridge vlan del vid 1 untagged pvid dev %s" % bond_name)
234 node.run("/sbin/bridge vlan add vid 1000 dev %s" % bond_name)
235 node.run("/sbin/bridge vlan add vid 1000 untagged pvid dev %s" % bond_name)
236
237
238 def config_mcast_tunnel_termination_device(node):
239 """
240 The kernel requires a device to terminate VxLAN multicast tunnels
241 when EVPN-PIM is used for flooded traffic
242 """
243 node.run("ip link add dev ipmr-lo type dummy")
244 node.run("ip link set dev ipmr-lo mtu 16000")
245 node.run("ip link set dev ipmr-lo mode dormant")
246 node.run("ip link set dev ipmr-lo up")
247
248
249 def config_bridge(node):
250 """
251 Create a VLAN aware bridge
252 """
253 node.run("ip link add dev bridge type bridge stp_state 0")
254 node.run("ip link set dev bridge type bridge vlan_filtering 1")
255 node.run("ip link set dev bridge mtu 9216")
256 node.run("ip link set dev bridge type bridge ageing_time 1800")
257 node.run("ip link set dev bridge type bridge mcast_snooping 0")
258 node.run("ip link set dev bridge type bridge vlan_stats_enabled 1")
259 node.run("ip link set dev bridge up")
260 node.run("/sbin/bridge vlan add vid 1000 dev bridge")
261
262
263 def config_vxlan(node, node_ip):
264 """
265 Create a VxLAN device for VNI 1000 and add it to the bridge.
266 VLAN-1000 is mapped to VNI-1000.
267 """
268 node.run("ip link add dev vx-1000 type vxlan id 1000 dstport 4789")
269 node.run("ip link set dev vx-1000 type vxlan nolearning")
270 node.run("ip link set dev vx-1000 type vxlan local %s" % node_ip)
271 node.run("ip link set dev vx-1000 type vxlan ttl 64")
272 node.run("ip link set dev vx-1000 mtu 9152")
273 node.run("ip link set dev vx-1000 type vxlan dev ipmr-lo group 239.1.1.100")
274 node.run("ip link set dev vx-1000 up")
275
276 # bridge attrs
277 node.run("ip link set dev vx-1000 master bridge")
278 node.run("/sbin/bridge link set dev vx-1000 neigh_suppress on")
279 node.run("/sbin/bridge link set dev vx-1000 learning off")
280 node.run("/sbin/bridge link set dev vx-1000 priority 8")
281 node.run("/sbin/bridge vlan del vid 1 dev vx-1000")
282 node.run("/sbin/bridge vlan del vid 1 untagged pvid dev vx-1000")
283 node.run("/sbin/bridge vlan add vid 1000 dev vx-1000")
284 node.run("/sbin/bridge vlan add vid 1000 untagged pvid dev vx-1000")
285
286
287 def config_svi(node, svi_pip):
288 """
289 Create an SVI for VLAN 1000
290 """
291 node.run("ip link add link bridge name vlan1000 type vlan id 1000 protocol 802.1q")
292 node.run("ip addr add %s/24 dev vlan1000" % svi_pip)
293 node.run("ip link set dev vlan1000 up")
294 node.run("/sbin/sysctl net.ipv4.conf.vlan1000.arp_accept=1")
295 node.run("ip link add link vlan1000 name vlan1000-v0 type macvlan mode private")
296 node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.accept_dad=0")
297 node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits")
298 node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits=0")
299 node.run("ip link set dev vlan1000-v0 address 00:00:5e:00:01:01")
300 node.run("ip link set dev vlan1000-v0 up")
301 # metric 1024 is not working
302 node.run("ip addr add 45.0.0.1/24 dev vlan1000-v0")
303
304
305 def config_tor(tor_name, tor, tor_ip, svi_pip):
306 """
307 Create the bond/vxlan-bridge on the TOR which acts as VTEP and EPN-PE
308 """
309 # create a device for terminating VxLAN multicast tunnels
310 config_mcast_tunnel_termination_device(tor)
311
312 # create a vlan aware bridge
313 config_bridge(tor)
314
315 # create vxlan device and add it to bridge
316 config_vxlan(tor, tor_ip)
317
318 # create hostbonds and add them to the bridge
319 if "torm1" in tor_name:
320 sys_mac = "44:38:39:ff:ff:01"
321 else:
322 sys_mac = "44:38:39:ff:ff:02"
323 bond_member = tor_name + "-eth2"
324 config_bond(tor, "hostbond1", [bond_member], sys_mac, "bridge")
325
326 bond_member = tor_name + "-eth3"
327 config_bond(tor, "hostbond2", [bond_member], sys_mac, "bridge")
328
329 # create SVI
330 config_svi(tor, svi_pip)
331
332
333 def config_tors(tgen, tors):
334 for tor_name in tors:
335 tor = tgen.gears[tor_name]
336 config_tor(tor_name, tor, tor_ips.get(tor_name), svi_ips.get(tor_name))
337
338
339 def compute_host_ip_mac(host_name):
340 host_id = host_name.split("hostd")[1]
341 host_ip = "45.0.0." + host_id + "/24"
342 host_mac = "00:00:00:00:00:" + host_id
343
344 return host_ip, host_mac
345
346
347 def config_host(host_name, host):
348 """
349 Create the dual-attached bond on host nodes for MH
350 """
351 bond_members = []
352 bond_members.append(host_name + "-eth0")
353 bond_members.append(host_name + "-eth1")
354 bond_name = "torbond"
355 config_bond(host, bond_name, bond_members, "00:00:00:00:00:00", None)
356
357 host_ip, host_mac = compute_host_ip_mac(host_name)
358 host.run("ip addr add %s dev %s" % (host_ip, bond_name))
359 host.run("ip link set dev %s address %s" % (bond_name, host_mac))
360
361
362 def config_hosts(tgen, hosts):
363 for host_name in hosts:
364 host = tgen.gears[host_name]
365 config_host(host_name, host)
366
367
368 def setup_module(module):
369 "Setup topology"
370 tgen = Topogen(NetworkTopo, module.__name__)
371 tgen.start_topology()
372
373 krel = platform.release()
374 if topotest.version_cmp(krel, "4.19") < 0:
375 tgen.errors = "kernel 4.19 needed for multihoming tests"
376 pytest.skip(tgen.errors)
377
378 tors = []
379 tors.append("torm11")
380 tors.append("torm12")
381 tors.append("torm21")
382 tors.append("torm22")
383 config_tors(tgen, tors)
384
385 hosts = []
386 hosts.append("hostd11")
387 hosts.append("hostd12")
388 hosts.append("hostd21")
389 hosts.append("hostd22")
390 config_hosts(tgen, hosts)
391
392 # tgen.mininet_cli()
393 # This is a sample of configuration loading.
394 router_list = tgen.routers()
395 for rname, router in router_list.items():
396 router.load_config(
397 TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
398 )
399 router.load_config(
400 TopoRouter.RD_PIM, os.path.join(CWD, "{}/pim.conf".format(rname))
401 )
402 router.load_config(
403 TopoRouter.RD_BGP, os.path.join(CWD, "{}/evpn.conf".format(rname))
404 )
405 tgen.start_router()
406 # tgen.mininet_cli()
407
408
409 def teardown_module(_mod):
410 "Teardown the pytest environment"
411 tgen = get_topogen()
412
413 # This function tears down the whole topology.
414 tgen.stop_topology()
415
416
417 def check_local_es(esi, vtep_ips, dut_name, down_vteps):
418 """
419 Check if ES peers are setup correctly on local ESs
420 """
421 peer_ips = []
422 if "torm1" in dut_name:
423 tor_ips_rack = tor_ips_rack_1
424 else:
425 tor_ips_rack = tor_ips_rack_2
426
427 for tor_name, tor_ip in tor_ips_rack.items():
428 if dut_name not in tor_name:
429 peer_ips.append(tor_ip)
430
431 # remove down VTEPs from the peer check list
432 peer_set = set(peer_ips)
433 down_vtep_set = set(down_vteps)
434 peer_set = peer_set - down_vtep_set
435
436 vtep_set = set(vtep_ips)
437 diff = peer_set.symmetric_difference(vtep_set)
438
439 return (esi, diff) if diff else None
440
441
442 def check_remote_es(esi, vtep_ips, dut_name, down_vteps):
443 """
444 Verify list of PEs associated with a remote ES
445 """
446 remote_ips = []
447
448 if "torm1" in dut_name:
449 tor_ips_rack = tor_ips_rack_2
450 else:
451 tor_ips_rack = tor_ips_rack_1
452
453 for tor_name, tor_ip in tor_ips_rack.items():
454 remote_ips.append(tor_ip)
455
456 # remove down VTEPs from the remote check list
457 remote_set = set(remote_ips)
458 down_vtep_set = set(down_vteps)
459 remote_set = remote_set - down_vtep_set
460
461 vtep_set = set(vtep_ips)
462 diff = remote_set.symmetric_difference(vtep_set)
463
464 return (esi, diff) if diff else None
465
466
467 def check_es(dut):
468 """
469 Verify list of PEs associated all ESs, local and remote
470 """
471 bgp_es = dut.vtysh_cmd("show bgp l2vp evpn es json")
472 bgp_es_json = json.loads(bgp_es)
473
474 result = None
475
476 expected_es_set = set([v for k, v in host_es_map.items()])
477 curr_es_set = []
478
479 # check is ES content is correct
480 for es in bgp_es_json:
481 esi = es["esi"]
482 curr_es_set.append(esi)
483 types = es["type"]
484 vtep_ips = []
485 for vtep in es.get("vteps", []):
486 vtep_ips.append(vtep["vtep_ip"])
487
488 if "local" in types:
489 result = check_local_es(esi, vtep_ips, dut.name, [])
490 else:
491 result = check_remote_es(esi, vtep_ips, dut.name, [])
492
493 if result:
494 return result
495
496 # check if all ESs are present
497 curr_es_set = set(curr_es_set)
498 result = curr_es_set.symmetric_difference(expected_es_set)
499
500 return result if result else None
501
502
503 def check_one_es(dut, esi, down_vteps):
504 """
505 Verify list of PEs associated all ESs, local and remote
506 """
507 bgp_es = dut.vtysh_cmd("show bgp l2vp evpn es %s json" % esi)
508 es = json.loads(bgp_es)
509
510 if not es:
511 return "esi %s not found" % esi
512
513 esi = es["esi"]
514 types = es["type"]
515 vtep_ips = []
516 for vtep in es.get("vteps", []):
517 vtep_ips.append(vtep["vtep_ip"])
518
519 if "local" in types:
520 result = check_local_es(esi, vtep_ips, dut.name, down_vteps)
521 else:
522 result = check_remote_es(esi, vtep_ips, dut.name, down_vteps)
523
524 return result
525
526
527 def test_evpn_es():
528 """
529 Two ES are setup on each rack. This test checks if -
530 1. ES peer has been added to the local ES (via Type-1/EAD route)
531 2. The remote ESs are setup with the right list of PEs (via Type-1)
532 """
533
534 tgen = get_topogen()
535
536 if tgen.routers_have_failure():
537 pytest.skip(tgen.errors)
538
539 dut_name = "torm11"
540 dut = tgen.gears[dut_name]
541 test_fn = partial(check_es, dut)
542 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
543
544 assertmsg = '"{}" ES content incorrect'.format(dut_name)
545 assert result is None, assertmsg
546 # tgen.mininet_cli()
547
548
549 def test_evpn_ead_update():
550 """
551 Flap a host link one the remote rack and check if the EAD updates
552 are sent/processed for the corresponding ESI
553 """
554 tgen = get_topogen()
555
556 if tgen.routers_have_failure():
557 pytest.skip(tgen.errors)
558
559 # dut on rack1 and host link flap on rack2
560 dut_name = "torm11"
561 dut = tgen.gears[dut_name]
562
563 remote_tor_name = "torm21"
564 remote_tor = tgen.gears[remote_tor_name]
565
566 host_name = "hostd21"
567 host = tgen.gears[host_name]
568 esi = host_es_map.get(host_name)
569
570 # check if the VTEP list is right to start with
571 down_vteps = []
572 test_fn = partial(check_one_es, dut, esi, down_vteps)
573 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
574 assertmsg = '"{}" ES content incorrect'.format(dut_name)
575 assert result is None, assertmsg
576
577 # down a remote host link and check if the EAD withdraw is rxed
578 # Note: LACP is not working as expected so I am temporarily shutting
579 # down the link on the remote TOR instead of the remote host
580 remote_tor.run("ip link set dev %s-%s down" % (remote_tor_name, "eth2"))
581 down_vteps.append(tor_ips.get(remote_tor_name))
582 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
583 assertmsg = '"{}" ES incorrect after remote link down'.format(dut_name)
584 assert result is None, assertmsg
585
586 # bring up remote host link and check if the EAD update is rxed
587 down_vteps.remove(tor_ips.get(remote_tor_name))
588 remote_tor.run("ip link set dev %s-%s up" % (remote_tor_name, "eth2"))
589 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
590 assertmsg = '"{}" ES incorrect after remote link flap'.format(dut_name)
591 assert result is None, assertmsg
592
593 # tgen.mininet_cli()
594
595
596 def ping_anycast_gw(tgen):
597 local_host = tgen.gears["hostd11"]
598 remote_host = tgen.gears["hostd21"]
599
600 # ping the anycast gw from the local and remote hosts to populate
601 # the mac address on the PEs
602 cmd_str = "arping -I torbond -c 1 45.0.0.1"
603 local_host.run(cmd_str)
604 remote_host.run(cmd_str)
605
606
607 def check_mac(dut, vni, mac, m_type, esi, intf, ping_gw=False, tgen=None):
608 """
609 checks if mac is present and if desination matches the one provided
610 """
611
612 if ping_gw:
613 ping_anycast_gw(tgen)
614
615 out = dut.vtysh_cmd("show evpn mac vni %d mac %s json" % (vni, mac))
616
617 mac_js = json.loads(out)
618 for mac, info in mac_js.items():
619 tmp_esi = info.get("esi", "")
620 tmp_m_type = info.get("type", "")
621 tmp_intf = info.get("intf", "") if tmp_m_type == "local" else ""
622 if tmp_esi == esi and tmp_m_type == m_type and intf == intf:
623 return None
624
625 return "invalid vni %d mac %s out %s" % (vni, mac, mac_js)
626
627
628 def test_evpn_mac():
629 """
630 1. Add a MAC on hostd11 and check if the MAC is synced between
631 torm11 and torm12. And installed as a local MAC.
632 2. Add a MAC on hostd21 and check if the MAC is installed as a
633 remote MAC on torm11 and torm12
634 """
635
636 tgen = get_topogen()
637
638 local_host = tgen.gears["hostd11"]
639 remote_host = tgen.gears["hostd21"]
640 tors = []
641 tors.append(tgen.gears["torm11"])
642 tors.append(tgen.gears["torm12"])
643
644 vni = 1000
645
646 # check if the rack-1 host MAC is present on all rack-1 PEs
647 # and points to local access port
648 m_type = "local"
649 _, mac = compute_host_ip_mac(local_host.name)
650 esi = host_es_map.get(local_host.name)
651 intf = "hostbond1"
652
653 for tor in tors:
654 test_fn = partial(check_mac, tor, vni, mac, m_type, esi, intf, True, tgen)
655 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
656 assertmsg = '"{}" local MAC content incorrect'.format(tor.name)
657 assert result is None, assertmsg
658
659 # check if the rack-2 host MAC is present on all rack-1 PEs
660 # and points to the remote ES destination
661 m_type = "remote"
662 _, mac = compute_host_ip_mac(remote_host.name)
663 esi = host_es_map.get(remote_host.name)
664 intf = ""
665
666 for tor in tors:
667 test_fn = partial(check_mac, tor, vni, mac, m_type, esi, intf)
668 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
669 assertmsg = '"{}" remote MAC content incorrect'.format(tor.name)
670 assert result is None, assertmsg
671
672
673 def check_df_role(dut, esi, role):
674 """
675 Return error string if the df role on the dut is different
676 """
677 es_json = dut.vtysh_cmd("show evpn es %s json" % esi)
678 es = json.loads(es_json)
679
680 if not es:
681 return "esi %s not found" % esi
682
683 flags = es.get("flags", [])
684 curr_role = "nonDF" if "nonDF" in flags else "DF"
685
686 if curr_role != role:
687 return "%s is %s for %s" % (dut.name, curr_role, esi)
688
689 return None
690
691
692 def test_evpn_df():
693 """
694 1. Check the DF role on all the PEs on rack-1.
695 2. Increase the DF preference on the non-DF and check if it becomes
696 the DF winner.
697 """
698
699 tgen = get_topogen()
700
701 if tgen.routers_have_failure():
702 pytest.skip(tgen.errors)
703
704 # We will run the tests on just one ES
705 esi = host_es_map.get("hostd11")
706 intf = "hostbond1"
707
708 tors = []
709 tors.append(tgen.gears["torm11"])
710 tors.append(tgen.gears["torm12"])
711 df_node = "torm11"
712
713 # check roles on rack-1
714 for tor in tors:
715 role = "DF" if tor.name == df_node else "nonDF"
716 test_fn = partial(check_df_role, tor, esi, role)
717 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
718 assertmsg = '"{}" DF role incorrect'.format(tor.name)
719 assert result is None, assertmsg
720
721 # change df preference on the nonDF to make it the df
722 torm12 = tgen.gears["torm12"]
723 torm12.vtysh_cmd("conf\ninterface %s\nevpn mh es-df-pref %d" % (intf, 60000))
724 df_node = "torm12"
725
726 # re-check roles on rack-1; we should have a new winner
727 for tor in tors:
728 role = "DF" if tor.name == df_node else "nonDF"
729 test_fn = partial(check_df_role, tor, esi, role)
730 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
731 assertmsg = '"{}" DF role incorrect'.format(tor.name)
732 assert result is None, assertmsg
733
734 # tgen.mininet_cli()
735
736
737 def check_protodown_rc(dut, protodown_rc):
738 """
739 check if specified protodown reason code is set
740 """
741
742 out = dut.vtysh_cmd("show evpn json")
743
744 evpn_js = json.loads(out)
745 tmp_rc = evpn_js.get("protodownReasons", [])
746
747 if protodown_rc:
748 if protodown_rc not in tmp_rc:
749 return "protodown %s missing in %s" % (protodown_rc, tmp_rc)
750 else:
751 if tmp_rc:
752 return "unexpected protodown rc %s" % (tmp_rc)
753
754 return None
755
756
757 def test_evpn_uplink_tracking():
758 """
759 1. Wait for access ports to come out of startup-delay
760 2. disable uplinks and check if access ports have been protodowned
761 3. enable uplinks and check if access ports have been moved out
762 of protodown
763 """
764
765 tgen = get_topogen()
766
767 dut_name = "torm11"
768 dut = tgen.gears[dut_name]
769
770 # wait for protodown rc to clear after startup
771 test_fn = partial(check_protodown_rc, dut, None)
772 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
773 assertmsg = '"{}" protodown rc incorrect'.format(dut_name)
774 assert result is None, assertmsg
775
776 # disable the uplinks
777 dut.run("ip link set %s-eth0 down" % dut_name)
778 dut.run("ip link set %s-eth1 down" % dut_name)
779
780 # check if the access ports have been protodowned
781 test_fn = partial(check_protodown_rc, dut, "uplinkDown")
782 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
783 assertmsg = '"{}" protodown rc incorrect'.format(dut_name)
784 assert result is None, assertmsg
785
786 # enable the uplinks
787 dut.run("ip link set %s-eth0 up" % dut_name)
788 dut.run("ip link set %s-eth1 up" % dut_name)
789
790 # check if the access ports have been moved out of protodown
791 test_fn = partial(check_protodown_rc, dut, None)
792 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
793 assertmsg = '"{}" protodown rc incorrect'.format(dut_name)
794 assert result is None, assertmsg
795
796
797 if __name__ == "__main__":
798 args = ["-s"] + sys.argv[1:]
799 sys.exit(pytest.main(args))