]> git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/bgp_evpn_mh/test_evpn_mh.py
Merge pull request #9085 from mobash-rasool/pim-upst-4
[mirror_frr.git] / tests / topotests / bgp_evpn_mh / test_evpn_mh.py
1 #!/usr/bin/env python
2
3 #
4 # test_evpn_mh.py
5 #
6 # Copyright (c) 2020 by
7 # Cumulus Networks, Inc.
8 # Anuradha Karuppiah
9 #
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
13 # in all copies.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 # OF THIS SOFTWARE.
23 #
24
25 """
26 test_evpn_mh.py: Testing EVPN multihoming
27
28 """
29
30 import os
31 import re
32 import sys
33 import pytest
34 import json
35 import platform
36 from functools import partial
37
38 pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
39
40 # Save the Current Working Directory to find configuration files.
41 CWD = os.path.dirname(os.path.realpath(__file__))
42 sys.path.append(os.path.join(CWD, "../"))
43
44 # pylint: disable=C0413
45 # Import topogen and topotest helpers
46 from lib import topotest
47 from lib.topogen import Topogen, TopoRouter, get_topogen
48 from lib.topolog import logger
49
50 # Required to instantiate the topology builder class.
51 from mininet.topo import Topo
52
53 pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
54
55
56 #####################################################
57 ##
58 ## Network Topology Definition
59 ##
60 ## See topology picture at evpn-mh-topo-tests.pdf
61 #####################################################
62
63
64 class NetworkTopo(Topo):
65 """
66 EVPN Multihoming Topology -
67 1. Two level CLOS
68 2. Two spine switches - spine1, spine2
69 3. Two racks with Top-of-Rack switches per rack - tormx1, tormx2
70 4. Two dual attached hosts per-rack - hostdx1, hostdx2
71 """
72
73 def build(self, **_opts):
74 "Build function"
75
76 tgen = get_topogen(self)
77
78 tgen.add_router("spine1")
79 tgen.add_router("spine2")
80 tgen.add_router("torm11")
81 tgen.add_router("torm12")
82 tgen.add_router("torm21")
83 tgen.add_router("torm22")
84 tgen.add_router("hostd11")
85 tgen.add_router("hostd12")
86 tgen.add_router("hostd21")
87 tgen.add_router("hostd22")
88
89 # On main router
90 # First switch is for a dummy interface (for local network)
91
92 ##################### spine1 ########################
93 # spine1-eth0 is connected to torm11-eth0
94 switch = tgen.add_switch("sw1")
95 switch.add_link(tgen.gears["spine1"])
96 switch.add_link(tgen.gears["torm11"])
97
98 # spine1-eth1 is connected to torm12-eth0
99 switch = tgen.add_switch("sw2")
100 switch.add_link(tgen.gears["spine1"])
101 switch.add_link(tgen.gears["torm12"])
102
103 # spine1-eth2 is connected to torm21-eth0
104 switch = tgen.add_switch("sw3")
105 switch.add_link(tgen.gears["spine1"])
106 switch.add_link(tgen.gears["torm21"])
107
108 # spine1-eth3 is connected to torm22-eth0
109 switch = tgen.add_switch("sw4")
110 switch.add_link(tgen.gears["spine1"])
111 switch.add_link(tgen.gears["torm22"])
112
113 ##################### spine2 ########################
114 # spine2-eth0 is connected to torm11-eth1
115 switch = tgen.add_switch("sw5")
116 switch.add_link(tgen.gears["spine2"])
117 switch.add_link(tgen.gears["torm11"])
118
119 # spine2-eth1 is connected to torm12-eth1
120 switch = tgen.add_switch("sw6")
121 switch.add_link(tgen.gears["spine2"])
122 switch.add_link(tgen.gears["torm12"])
123
124 # spine2-eth2 is connected to torm21-eth1
125 switch = tgen.add_switch("sw7")
126 switch.add_link(tgen.gears["spine2"])
127 switch.add_link(tgen.gears["torm21"])
128
129 # spine2-eth3 is connected to torm22-eth1
130 switch = tgen.add_switch("sw8")
131 switch.add_link(tgen.gears["spine2"])
132 switch.add_link(tgen.gears["torm22"])
133
134 ##################### torm11 ########################
135 # torm11-eth2 is connected to hostd11-eth0
136 switch = tgen.add_switch("sw9")
137 switch.add_link(tgen.gears["torm11"])
138 switch.add_link(tgen.gears["hostd11"])
139
140 # torm11-eth3 is connected to hostd12-eth0
141 switch = tgen.add_switch("sw10")
142 switch.add_link(tgen.gears["torm11"])
143 switch.add_link(tgen.gears["hostd12"])
144
145 ##################### torm12 ########################
146 # torm12-eth2 is connected to hostd11-eth1
147 switch = tgen.add_switch("sw11")
148 switch.add_link(tgen.gears["torm12"])
149 switch.add_link(tgen.gears["hostd11"])
150
151 # torm12-eth3 is connected to hostd12-eth1
152 switch = tgen.add_switch("sw12")
153 switch.add_link(tgen.gears["torm12"])
154 switch.add_link(tgen.gears["hostd12"])
155
156 ##################### torm21 ########################
157 # torm21-eth2 is connected to hostd21-eth0
158 switch = tgen.add_switch("sw13")
159 switch.add_link(tgen.gears["torm21"])
160 switch.add_link(tgen.gears["hostd21"])
161
162 # torm21-eth3 is connected to hostd22-eth0
163 switch = tgen.add_switch("sw14")
164 switch.add_link(tgen.gears["torm21"])
165 switch.add_link(tgen.gears["hostd22"])
166
167 ##################### torm22 ########################
168 # torm22-eth2 is connected to hostd21-eth1
169 switch = tgen.add_switch("sw15")
170 switch.add_link(tgen.gears["torm22"])
171 switch.add_link(tgen.gears["hostd21"])
172
173 # torm22-eth3 is connected to hostd22-eth1
174 switch = tgen.add_switch("sw16")
175 switch.add_link(tgen.gears["torm22"])
176 switch.add_link(tgen.gears["hostd22"])
177
178
179 #####################################################
180 ##
181 ## Tests starting
182 ##
183 #####################################################
184
185 tor_ips = {
186 "torm11": "192.168.100.15",
187 "torm12": "192.168.100.16",
188 "torm21": "192.168.100.17",
189 "torm22": "192.168.100.18",
190 }
191
192 svi_ips = {
193 "torm11": "45.0.0.2",
194 "torm12": "45.0.0.3",
195 "torm21": "45.0.0.4",
196 "torm22": "45.0.0.5",
197 }
198
199 tor_ips_rack_1 = {"torm11": "192.168.100.15", "torm12": "192.168.100.16"}
200
201 tor_ips_rack_2 = {"torm21": "192.168.100.17", "torm22": "192.168.100.18"}
202
203 host_es_map = {
204 "hostd11": "03:44:38:39:ff:ff:01:00:00:01",
205 "hostd12": "03:44:38:39:ff:ff:01:00:00:02",
206 "hostd21": "03:44:38:39:ff:ff:02:00:00:01",
207 "hostd22": "03:44:38:39:ff:ff:02:00:00:02",
208 }
209
210
211 def config_bond(node, bond_name, bond_members, bond_ad_sys_mac, br):
212 """
213 Used to setup bonds on the TORs and hosts for MH
214 """
215 node.run("ip link add dev %s type bond mode 802.3ad" % bond_name)
216 node.run("ip link set dev %s type bond lacp_rate 1" % bond_name)
217 node.run("ip link set dev %s type bond miimon 100" % bond_name)
218 node.run("ip link set dev %s type bond xmit_hash_policy layer3+4" % bond_name)
219 node.run("ip link set dev %s type bond min_links 1" % bond_name)
220 node.run(
221 "ip link set dev %s type bond ad_actor_system %s" % (bond_name, bond_ad_sys_mac)
222 )
223
224 for bond_member in bond_members:
225 node.run("ip link set dev %s down" % bond_member)
226 node.run("ip link set dev %s master %s" % (bond_member, bond_name))
227 node.run("ip link set dev %s up" % bond_member)
228
229 node.run("ip link set dev %s up" % bond_name)
230
231 # if bridge is specified add the bond as a bridge member
232 if br:
233 node.run(" ip link set dev %s master bridge" % bond_name)
234 node.run("/sbin/bridge link set dev %s priority 8" % bond_name)
235 node.run("/sbin/bridge vlan del vid 1 dev %s" % bond_name)
236 node.run("/sbin/bridge vlan del vid 1 untagged pvid dev %s" % bond_name)
237 node.run("/sbin/bridge vlan add vid 1000 dev %s" % bond_name)
238 node.run("/sbin/bridge vlan add vid 1000 untagged pvid dev %s" % bond_name)
239
240
241 def config_mcast_tunnel_termination_device(node):
242 """
243 The kernel requires a device to terminate VxLAN multicast tunnels
244 when EVPN-PIM is used for flooded traffic
245 """
246 node.run("ip link add dev ipmr-lo type dummy")
247 node.run("ip link set dev ipmr-lo mtu 16000")
248 node.run("ip link set dev ipmr-lo mode dormant")
249 node.run("ip link set dev ipmr-lo up")
250
251
252 def config_bridge(node):
253 """
254 Create a VLAN aware bridge
255 """
256 node.run("ip link add dev bridge type bridge stp_state 0")
257 node.run("ip link set dev bridge type bridge vlan_filtering 1")
258 node.run("ip link set dev bridge mtu 9216")
259 node.run("ip link set dev bridge type bridge ageing_time 1800")
260 node.run("ip link set dev bridge type bridge mcast_snooping 0")
261 node.run("ip link set dev bridge type bridge vlan_stats_enabled 1")
262 node.run("ip link set dev bridge up")
263 node.run("/sbin/bridge vlan add vid 1000 dev bridge")
264
265
266 def config_vxlan(node, node_ip):
267 """
268 Create a VxLAN device for VNI 1000 and add it to the bridge.
269 VLAN-1000 is mapped to VNI-1000.
270 """
271 node.run("ip link add dev vx-1000 type vxlan id 1000 dstport 4789")
272 node.run("ip link set dev vx-1000 type vxlan nolearning")
273 node.run("ip link set dev vx-1000 type vxlan local %s" % node_ip)
274 node.run("ip link set dev vx-1000 type vxlan ttl 64")
275 node.run("ip link set dev vx-1000 mtu 9152")
276 node.run("ip link set dev vx-1000 type vxlan dev ipmr-lo group 239.1.1.100")
277 node.run("ip link set dev vx-1000 up")
278
279 # bridge attrs
280 node.run("ip link set dev vx-1000 master bridge")
281 node.run("/sbin/bridge link set dev vx-1000 neigh_suppress on")
282 node.run("/sbin/bridge link set dev vx-1000 learning off")
283 node.run("/sbin/bridge link set dev vx-1000 priority 8")
284 node.run("/sbin/bridge vlan del vid 1 dev vx-1000")
285 node.run("/sbin/bridge vlan del vid 1 untagged pvid dev vx-1000")
286 node.run("/sbin/bridge vlan add vid 1000 dev vx-1000")
287 node.run("/sbin/bridge vlan add vid 1000 untagged pvid dev vx-1000")
288
289
290 def config_svi(node, svi_pip):
291 """
292 Create an SVI for VLAN 1000
293 """
294 node.run("ip link add link bridge name vlan1000 type vlan id 1000 protocol 802.1q")
295 node.run("ip addr add %s/24 dev vlan1000" % svi_pip)
296 node.run("ip link set dev vlan1000 up")
297 node.run("/sbin/sysctl net.ipv4.conf.vlan1000.arp_accept=1")
298 node.run("ip link add link vlan1000 name vlan1000-v0 type macvlan mode private")
299 node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.accept_dad=0")
300 node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits")
301 node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits=0")
302 node.run("ip link set dev vlan1000-v0 address 00:00:5e:00:01:01")
303 node.run("ip link set dev vlan1000-v0 up")
304 # metric 1024 is not working
305 node.run("ip addr add 45.0.0.1/24 dev vlan1000-v0")
306
307
308 def config_tor(tor_name, tor, tor_ip, svi_pip):
309 """
310 Create the bond/vxlan-bridge on the TOR which acts as VTEP and EPN-PE
311 """
312 # create a device for terminating VxLAN multicast tunnels
313 config_mcast_tunnel_termination_device(tor)
314
315 # create a vlan aware bridge
316 config_bridge(tor)
317
318 # create vxlan device and add it to bridge
319 config_vxlan(tor, tor_ip)
320
321 # create hostbonds and add them to the bridge
322 if "torm1" in tor_name:
323 sys_mac = "44:38:39:ff:ff:01"
324 else:
325 sys_mac = "44:38:39:ff:ff:02"
326 bond_member = tor_name + "-eth2"
327 config_bond(tor, "hostbond1", [bond_member], sys_mac, "bridge")
328
329 bond_member = tor_name + "-eth3"
330 config_bond(tor, "hostbond2", [bond_member], sys_mac, "bridge")
331
332 # create SVI
333 config_svi(tor, svi_pip)
334
335
336 def config_tors(tgen, tors):
337 for tor_name in tors:
338 tor = tgen.gears[tor_name]
339 config_tor(tor_name, tor, tor_ips.get(tor_name), svi_ips.get(tor_name))
340
341
342 def compute_host_ip_mac(host_name):
343 host_id = host_name.split("hostd")[1]
344 host_ip = "45.0.0." + host_id + "/24"
345 host_mac = "00:00:00:00:00:" + host_id
346
347 return host_ip, host_mac
348
349
350 def config_host(host_name, host):
351 """
352 Create the dual-attached bond on host nodes for MH
353 """
354 bond_members = []
355 bond_members.append(host_name + "-eth0")
356 bond_members.append(host_name + "-eth1")
357 bond_name = "torbond"
358 config_bond(host, bond_name, bond_members, "00:00:00:00:00:00", None)
359
360 host_ip, host_mac = compute_host_ip_mac(host_name)
361 host.run("ip addr add %s dev %s" % (host_ip, bond_name))
362 host.run("ip link set dev %s address %s" % (bond_name, host_mac))
363
364
365 def config_hosts(tgen, hosts):
366 for host_name in hosts:
367 host = tgen.gears[host_name]
368 config_host(host_name, host)
369
370
371 def setup_module(module):
372 "Setup topology"
373 tgen = Topogen(NetworkTopo, module.__name__)
374 tgen.start_topology()
375
376 krel = platform.release()
377 if topotest.version_cmp(krel, "4.19") < 0:
378 tgen.errors = "kernel 4.19 needed for multihoming tests"
379 pytest.skip(tgen.errors)
380
381 tors = []
382 tors.append("torm11")
383 tors.append("torm12")
384 tors.append("torm21")
385 tors.append("torm22")
386 config_tors(tgen, tors)
387
388 hosts = []
389 hosts.append("hostd11")
390 hosts.append("hostd12")
391 hosts.append("hostd21")
392 hosts.append("hostd22")
393 config_hosts(tgen, hosts)
394
395 # tgen.mininet_cli()
396 # This is a sample of configuration loading.
397 router_list = tgen.routers()
398 for rname, router in router_list.items():
399 router.load_config(
400 TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
401 )
402 router.load_config(
403 TopoRouter.RD_PIM, os.path.join(CWD, "{}/pim.conf".format(rname))
404 )
405 router.load_config(
406 TopoRouter.RD_BGP, os.path.join(CWD, "{}/evpn.conf".format(rname))
407 )
408 tgen.start_router()
409 # tgen.mininet_cli()
410
411
412 def teardown_module(_mod):
413 "Teardown the pytest environment"
414 tgen = get_topogen()
415
416 # This function tears down the whole topology.
417 tgen.stop_topology()
418
419
420 def check_local_es(esi, vtep_ips, dut_name, down_vteps):
421 """
422 Check if ES peers are setup correctly on local ESs
423 """
424 peer_ips = []
425 if "torm1" in dut_name:
426 tor_ips_rack = tor_ips_rack_1
427 else:
428 tor_ips_rack = tor_ips_rack_2
429
430 for tor_name, tor_ip in tor_ips_rack.items():
431 if dut_name not in tor_name:
432 peer_ips.append(tor_ip)
433
434 # remove down VTEPs from the peer check list
435 peer_set = set(peer_ips)
436 down_vtep_set = set(down_vteps)
437 peer_set = peer_set - down_vtep_set
438
439 vtep_set = set(vtep_ips)
440 diff = peer_set.symmetric_difference(vtep_set)
441
442 return (esi, diff) if diff else None
443
444
445 def check_remote_es(esi, vtep_ips, dut_name, down_vteps):
446 """
447 Verify list of PEs associated with a remote ES
448 """
449 remote_ips = []
450
451 if "torm1" in dut_name:
452 tor_ips_rack = tor_ips_rack_2
453 else:
454 tor_ips_rack = tor_ips_rack_1
455
456 for tor_name, tor_ip in tor_ips_rack.items():
457 remote_ips.append(tor_ip)
458
459 # remove down VTEPs from the remote check list
460 remote_set = set(remote_ips)
461 down_vtep_set = set(down_vteps)
462 remote_set = remote_set - down_vtep_set
463
464 vtep_set = set(vtep_ips)
465 diff = remote_set.symmetric_difference(vtep_set)
466
467 return (esi, diff) if diff else None
468
469
470 def check_es(dut):
471 """
472 Verify list of PEs associated all ESs, local and remote
473 """
474 bgp_es = dut.vtysh_cmd("show bgp l2vp evpn es json")
475 bgp_es_json = json.loads(bgp_es)
476
477 result = None
478
479 expected_es_set = set([v for k, v in host_es_map.items()])
480 curr_es_set = []
481
482 # check is ES content is correct
483 for es in bgp_es_json:
484 esi = es["esi"]
485 curr_es_set.append(esi)
486 types = es["type"]
487 vtep_ips = []
488 for vtep in es.get("vteps", []):
489 vtep_ips.append(vtep["vtep_ip"])
490
491 if "local" in types:
492 result = check_local_es(esi, vtep_ips, dut.name, [])
493 else:
494 result = check_remote_es(esi, vtep_ips, dut.name, [])
495
496 if result:
497 return result
498
499 # check if all ESs are present
500 curr_es_set = set(curr_es_set)
501 result = curr_es_set.symmetric_difference(expected_es_set)
502
503 return result if result else None
504
505
506 def check_one_es(dut, esi, down_vteps):
507 """
508 Verify list of PEs associated all ESs, local and remote
509 """
510 bgp_es = dut.vtysh_cmd("show bgp l2vp evpn es %s json" % esi)
511 es = json.loads(bgp_es)
512
513 if not es:
514 return "esi %s not found" % esi
515
516 esi = es["esi"]
517 types = es["type"]
518 vtep_ips = []
519 for vtep in es.get("vteps", []):
520 vtep_ips.append(vtep["vtep_ip"])
521
522 if "local" in types:
523 result = check_local_es(esi, vtep_ips, dut.name, down_vteps)
524 else:
525 result = check_remote_es(esi, vtep_ips, dut.name, down_vteps)
526
527 return result
528
529
530 def test_evpn_es():
531 """
532 Two ES are setup on each rack. This test checks if -
533 1. ES peer has been added to the local ES (via Type-1/EAD route)
534 2. The remote ESs are setup with the right list of PEs (via Type-1)
535 """
536
537 tgen = get_topogen()
538
539 if tgen.routers_have_failure():
540 pytest.skip(tgen.errors)
541
542 dut_name = "torm11"
543 dut = tgen.gears[dut_name]
544 test_fn = partial(check_es, dut)
545 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
546
547 assertmsg = '"{}" ES content incorrect'.format(dut_name)
548 assert result is None, assertmsg
549 # tgen.mininet_cli()
550
551
552 def test_evpn_ead_update():
553 """
554 Flap a host link one the remote rack and check if the EAD updates
555 are sent/processed for the corresponding ESI
556 """
557 tgen = get_topogen()
558
559 if tgen.routers_have_failure():
560 pytest.skip(tgen.errors)
561
562 # dut on rack1 and host link flap on rack2
563 dut_name = "torm11"
564 dut = tgen.gears[dut_name]
565
566 remote_tor_name = "torm21"
567 remote_tor = tgen.gears[remote_tor_name]
568
569 host_name = "hostd21"
570 host = tgen.gears[host_name]
571 esi = host_es_map.get(host_name)
572
573 # check if the VTEP list is right to start with
574 down_vteps = []
575 test_fn = partial(check_one_es, dut, esi, down_vteps)
576 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
577 assertmsg = '"{}" ES content incorrect'.format(dut_name)
578 assert result is None, assertmsg
579
580 # down a remote host link and check if the EAD withdraw is rxed
581 # Note: LACP is not working as expected so I am temporarily shutting
582 # down the link on the remote TOR instead of the remote host
583 remote_tor.run("ip link set dev %s-%s down" % (remote_tor_name, "eth2"))
584 down_vteps.append(tor_ips.get(remote_tor_name))
585 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
586 assertmsg = '"{}" ES incorrect after remote link down'.format(dut_name)
587 assert result is None, assertmsg
588
589 # bring up remote host link and check if the EAD update is rxed
590 down_vteps.remove(tor_ips.get(remote_tor_name))
591 remote_tor.run("ip link set dev %s-%s up" % (remote_tor_name, "eth2"))
592 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
593 assertmsg = '"{}" ES incorrect after remote link flap'.format(dut_name)
594 assert result is None, assertmsg
595
596 # tgen.mininet_cli()
597
598
599 def ping_anycast_gw(tgen):
600 # ping the anycast gw from the local and remote hosts to populate
601 # the mac address on the PEs
602 script_path = os.path.abspath(os.path.join(CWD, "../lib/scapy_sendpkt.py"))
603 intf = "torbond"
604 ipaddr = "45.0.0.1"
605 ping_cmd = [
606 script_path,
607 "--imports=Ether,ARP",
608 "--interface=" + intf,
609 "'Ether(dst=\"ff:ff:ff:ff:ff:ff\")/ARP(pdst=\"{}\")'".format(ipaddr)
610 ]
611 for name in ("hostd11", "hostd21"):
612 host = tgen.net[name]
613 stdout = host.cmd(ping_cmd)
614 stdout = stdout.strip()
615 if stdout:
616 host.logger.debug("%s: arping on %s for %s returned: %s", name, intf, ipaddr, stdout)
617
618
619 def check_mac(dut, vni, mac, m_type, esi, intf, ping_gw=False, tgen=None):
620 """
621 checks if mac is present and if desination matches the one provided
622 """
623
624 if ping_gw:
625 ping_anycast_gw(tgen)
626
627 out = dut.vtysh_cmd("show evpn mac vni %d mac %s json" % (vni, mac))
628
629 mac_js = json.loads(out)
630 for mac, info in mac_js.items():
631 tmp_esi = info.get("esi", "")
632 tmp_m_type = info.get("type", "")
633 tmp_intf = info.get("intf", "") if tmp_m_type == "local" else ""
634 if tmp_esi == esi and tmp_m_type == m_type and intf == intf:
635 return None
636
637 return "invalid vni %d mac %s out %s" % (vni, mac, mac_js)
638
639
640 def test_evpn_mac():
641 """
642 1. Add a MAC on hostd11 and check if the MAC is synced between
643 torm11 and torm12. And installed as a local MAC.
644 2. Add a MAC on hostd21 and check if the MAC is installed as a
645 remote MAC on torm11 and torm12
646 """
647
648 tgen = get_topogen()
649
650 local_host = tgen.gears["hostd11"]
651 remote_host = tgen.gears["hostd21"]
652 tors = []
653 tors.append(tgen.gears["torm11"])
654 tors.append(tgen.gears["torm12"])
655
656 vni = 1000
657
658 # check if the rack-1 host MAC is present on all rack-1 PEs
659 # and points to local access port
660 m_type = "local"
661 _, mac = compute_host_ip_mac(local_host.name)
662 esi = host_es_map.get(local_host.name)
663 intf = "hostbond1"
664
665 for tor in tors:
666 test_fn = partial(check_mac, tor, vni, mac, m_type, esi, intf, True, tgen)
667 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
668 assertmsg = '"{}" local MAC content incorrect'.format(tor.name)
669 assert result is None, assertmsg
670
671 # check if the rack-2 host MAC is present on all rack-1 PEs
672 # and points to the remote ES destination
673 m_type = "remote"
674 _, mac = compute_host_ip_mac(remote_host.name)
675 esi = host_es_map.get(remote_host.name)
676 intf = ""
677
678 for tor in tors:
679 test_fn = partial(check_mac, tor, vni, mac, m_type, esi, intf)
680 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
681 assertmsg = '"{}" remote MAC content incorrect'.format(tor.name)
682 assert result is None, assertmsg
683
684
685 def check_df_role(dut, esi, role):
686 """
687 Return error string if the df role on the dut is different
688 """
689 es_json = dut.vtysh_cmd("show evpn es %s json" % esi)
690 es = json.loads(es_json)
691
692 if not es:
693 return "esi %s not found" % esi
694
695 flags = es.get("flags", [])
696 curr_role = "nonDF" if "nonDF" in flags else "DF"
697
698 if curr_role != role:
699 return "%s is %s for %s" % (dut.name, curr_role, esi)
700
701 return None
702
703
704 def test_evpn_df():
705 """
706 1. Check the DF role on all the PEs on rack-1.
707 2. Increase the DF preference on the non-DF and check if it becomes
708 the DF winner.
709 """
710
711 tgen = get_topogen()
712
713 if tgen.routers_have_failure():
714 pytest.skip(tgen.errors)
715
716 # We will run the tests on just one ES
717 esi = host_es_map.get("hostd11")
718 intf = "hostbond1"
719
720 tors = []
721 tors.append(tgen.gears["torm11"])
722 tors.append(tgen.gears["torm12"])
723 df_node = "torm11"
724
725 # check roles on rack-1
726 for tor in tors:
727 role = "DF" if tor.name == df_node else "nonDF"
728 test_fn = partial(check_df_role, tor, esi, role)
729 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
730 assertmsg = '"{}" DF role incorrect'.format(tor.name)
731 assert result is None, assertmsg
732
733 # change df preference on the nonDF to make it the df
734 torm12 = tgen.gears["torm12"]
735 torm12.vtysh_cmd("conf\ninterface %s\nevpn mh es-df-pref %d" % (intf, 60000))
736 df_node = "torm12"
737
738 # re-check roles on rack-1; we should have a new winner
739 for tor in tors:
740 role = "DF" if tor.name == df_node else "nonDF"
741 test_fn = partial(check_df_role, tor, esi, role)
742 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
743 assertmsg = '"{}" DF role incorrect'.format(tor.name)
744 assert result is None, assertmsg
745
746 # tgen.mininet_cli()
747
748
749 def check_protodown_rc(dut, protodown_rc):
750 """
751 check if specified protodown reason code is set
752 """
753
754 out = dut.vtysh_cmd("show evpn json")
755
756 evpn_js = json.loads(out)
757 tmp_rc = evpn_js.get("protodownReasons", [])
758
759 if protodown_rc:
760 if protodown_rc not in tmp_rc:
761 return "protodown %s missing in %s" % (protodown_rc, tmp_rc)
762 else:
763 if tmp_rc:
764 return "unexpected protodown rc %s" % (tmp_rc)
765
766 return None
767
768
769 def test_evpn_uplink_tracking():
770 """
771 1. Wait for access ports to come out of startup-delay
772 2. disable uplinks and check if access ports have been protodowned
773 3. enable uplinks and check if access ports have been moved out
774 of protodown
775 """
776
777 tgen = get_topogen()
778
779 dut_name = "torm11"
780 dut = tgen.gears[dut_name]
781
782 # wait for protodown rc to clear after startup
783 test_fn = partial(check_protodown_rc, dut, None)
784 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
785 assertmsg = '"{}" protodown rc incorrect'.format(dut_name)
786 assert result is None, assertmsg
787
788 # disable the uplinks
789 dut.run("ip link set %s-eth0 down" % dut_name)
790 dut.run("ip link set %s-eth1 down" % dut_name)
791
792 # check if the access ports have been protodowned
793 test_fn = partial(check_protodown_rc, dut, "uplinkDown")
794 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
795 assertmsg = '"{}" protodown rc incorrect'.format(dut_name)
796 assert result is None, assertmsg
797
798 # enable the uplinks
799 dut.run("ip link set %s-eth0 up" % dut_name)
800 dut.run("ip link set %s-eth1 up" % dut_name)
801
802 # check if the access ports have been moved out of protodown
803 test_fn = partial(check_protodown_rc, dut, None)
804 _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3)
805 assertmsg = '"{}" protodown rc incorrect'.format(dut_name)
806 assert result is None, assertmsg
807
808
809 if __name__ == "__main__":
810 args = ["-s"] + sys.argv[1:]
811 sys.exit(pytest.main(args))