]>
git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/bgp_evpn_mh/test_evpn_mh.py
6 # Copyright (c) 2020 by
7 # Cumulus Networks, Inc.
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
26 test_evpn_mh.py: Testing EVPN multihoming
33 from functools
import partial
38 from functools
import partial
40 pytestmark
= [pytest
.mark
.bgpd
, pytest
.mark
.pimd
]
42 # Save the Current Working Directory to find configuration files.
43 CWD
= os
.path
.dirname(os
.path
.realpath(__file__
))
44 sys
.path
.append(os
.path
.join(CWD
, "../"))
46 # pylint: disable=C0413
47 # Import topogen and topotest helpers
48 from lib
import topotest
50 # Required to instantiate the topology builder class.
51 from lib
.topogen
import Topogen
, TopoRouter
, get_topogen
53 pytestmark
= [pytest
.mark
.bgpd
, pytest
.mark
.pimd
]
55 #####################################################
57 ## Network Topology Definition
59 ## See topology picture at evpn-mh-topo-tests.pdf
60 #####################################################
65 EVPN Multihoming Topology -
67 2. Two spine switches - spine1, spine2
68 3. Two racks with Top-of-Rack switches per rack - tormx1, tormx2
69 4. Two dual attached hosts per-rack - hostdx1, hostdx2
72 tgen
.add_router("spine1")
73 tgen
.add_router("spine2")
74 tgen
.add_router("torm11")
75 tgen
.add_router("torm12")
76 tgen
.add_router("torm21")
77 tgen
.add_router("torm22")
78 tgen
.add_router("hostd11")
79 tgen
.add_router("hostd12")
80 tgen
.add_router("hostd21")
81 tgen
.add_router("hostd22")
84 # First switch is for a dummy interface (for local network)
86 ##################### spine1 ########################
87 # spine1-eth0 is connected to torm11-eth0
88 switch
= tgen
.add_switch("sw1")
89 switch
.add_link(tgen
.gears
["spine1"])
90 switch
.add_link(tgen
.gears
["torm11"])
92 # spine1-eth1 is connected to torm12-eth0
93 switch
= tgen
.add_switch("sw2")
94 switch
.add_link(tgen
.gears
["spine1"])
95 switch
.add_link(tgen
.gears
["torm12"])
97 # spine1-eth2 is connected to torm21-eth0
98 switch
= tgen
.add_switch("sw3")
99 switch
.add_link(tgen
.gears
["spine1"])
100 switch
.add_link(tgen
.gears
["torm21"])
102 # spine1-eth3 is connected to torm22-eth0
103 switch
= tgen
.add_switch("sw4")
104 switch
.add_link(tgen
.gears
["spine1"])
105 switch
.add_link(tgen
.gears
["torm22"])
107 ##################### spine2 ########################
108 # spine2-eth0 is connected to torm11-eth1
109 switch
= tgen
.add_switch("sw5")
110 switch
.add_link(tgen
.gears
["spine2"])
111 switch
.add_link(tgen
.gears
["torm11"])
113 # spine2-eth1 is connected to torm12-eth1
114 switch
= tgen
.add_switch("sw6")
115 switch
.add_link(tgen
.gears
["spine2"])
116 switch
.add_link(tgen
.gears
["torm12"])
118 # spine2-eth2 is connected to torm21-eth1
119 switch
= tgen
.add_switch("sw7")
120 switch
.add_link(tgen
.gears
["spine2"])
121 switch
.add_link(tgen
.gears
["torm21"])
123 # spine2-eth3 is connected to torm22-eth1
124 switch
= tgen
.add_switch("sw8")
125 switch
.add_link(tgen
.gears
["spine2"])
126 switch
.add_link(tgen
.gears
["torm22"])
128 ##################### torm11 ########################
129 # torm11-eth2 is connected to hostd11-eth0
130 switch
= tgen
.add_switch("sw9")
131 switch
.add_link(tgen
.gears
["torm11"])
132 switch
.add_link(tgen
.gears
["hostd11"])
134 # torm11-eth3 is connected to hostd12-eth0
135 switch
= tgen
.add_switch("sw10")
136 switch
.add_link(tgen
.gears
["torm11"])
137 switch
.add_link(tgen
.gears
["hostd12"])
139 ##################### torm12 ########################
140 # torm12-eth2 is connected to hostd11-eth1
141 switch
= tgen
.add_switch("sw11")
142 switch
.add_link(tgen
.gears
["torm12"])
143 switch
.add_link(tgen
.gears
["hostd11"])
145 # torm12-eth3 is connected to hostd12-eth1
146 switch
= tgen
.add_switch("sw12")
147 switch
.add_link(tgen
.gears
["torm12"])
148 switch
.add_link(tgen
.gears
["hostd12"])
150 ##################### torm21 ########################
151 # torm21-eth2 is connected to hostd21-eth0
152 switch
= tgen
.add_switch("sw13")
153 switch
.add_link(tgen
.gears
["torm21"])
154 switch
.add_link(tgen
.gears
["hostd21"])
156 # torm21-eth3 is connected to hostd22-eth0
157 switch
= tgen
.add_switch("sw14")
158 switch
.add_link(tgen
.gears
["torm21"])
159 switch
.add_link(tgen
.gears
["hostd22"])
161 ##################### torm22 ########################
162 # torm22-eth2 is connected to hostd21-eth1
163 switch
= tgen
.add_switch("sw15")
164 switch
.add_link(tgen
.gears
["torm22"])
165 switch
.add_link(tgen
.gears
["hostd21"])
167 # torm22-eth3 is connected to hostd22-eth1
168 switch
= tgen
.add_switch("sw16")
169 switch
.add_link(tgen
.gears
["torm22"])
170 switch
.add_link(tgen
.gears
["hostd22"])
173 #####################################################
177 #####################################################
180 "torm11": "192.168.100.15",
181 "torm12": "192.168.100.16",
182 "torm21": "192.168.100.17",
183 "torm22": "192.168.100.18",
187 "torm11": "45.0.0.2",
188 "torm12": "45.0.0.3",
189 "torm21": "45.0.0.4",
190 "torm22": "45.0.0.5",
193 tor_ips_rack_1
= {"torm11": "192.168.100.15", "torm12": "192.168.100.16"}
195 tor_ips_rack_2
= {"torm21": "192.168.100.17", "torm22": "192.168.100.18"}
198 "hostd11": "03:44:38:39:ff:ff:01:00:00:01",
199 "hostd12": "03:44:38:39:ff:ff:01:00:00:02",
200 "hostd21": "03:44:38:39:ff:ff:02:00:00:01",
201 "hostd22": "03:44:38:39:ff:ff:02:00:00:02",
205 def config_bond(node
, bond_name
, bond_members
, bond_ad_sys_mac
, br
):
207 Used to setup bonds on the TORs and hosts for MH
209 node
.run("ip link add dev %s type bond mode 802.3ad" % bond_name
)
210 node
.run("ip link set dev %s type bond lacp_rate 1" % bond_name
)
211 node
.run("ip link set dev %s type bond miimon 100" % bond_name
)
212 node
.run("ip link set dev %s type bond xmit_hash_policy layer3+4" % bond_name
)
213 node
.run("ip link set dev %s type bond min_links 1" % bond_name
)
215 "ip link set dev %s type bond ad_actor_system %s" % (bond_name
, bond_ad_sys_mac
)
218 for bond_member
in bond_members
:
219 node
.run("ip link set dev %s down" % bond_member
)
220 node
.run("ip link set dev %s master %s" % (bond_member
, bond_name
))
221 node
.run("ip link set dev %s up" % bond_member
)
223 node
.run("ip link set dev %s up" % bond_name
)
225 # if bridge is specified add the bond as a bridge member
227 node
.run(" ip link set dev %s master bridge" % bond_name
)
228 node
.run("/sbin/bridge link set dev %s priority 8" % bond_name
)
229 node
.run("/sbin/bridge vlan del vid 1 dev %s" % bond_name
)
230 node
.run("/sbin/bridge vlan del vid 1 untagged pvid dev %s" % bond_name
)
231 node
.run("/sbin/bridge vlan add vid 1000 dev %s" % bond_name
)
232 node
.run("/sbin/bridge vlan add vid 1000 untagged pvid dev %s" % bond_name
)
235 def config_mcast_tunnel_termination_device(node
):
237 The kernel requires a device to terminate VxLAN multicast tunnels
238 when EVPN-PIM is used for flooded traffic
240 node
.run("ip link add dev ipmr-lo type dummy")
241 node
.run("ip link set dev ipmr-lo mtu 16000")
242 node
.run("ip link set dev ipmr-lo mode dormant")
243 node
.run("ip link set dev ipmr-lo up")
246 def config_bridge(node
):
248 Create a VLAN aware bridge
250 node
.run("ip link add dev bridge type bridge stp_state 0")
251 node
.run("ip link set dev bridge type bridge vlan_filtering 1")
252 node
.run("ip link set dev bridge mtu 9216")
253 node
.run("ip link set dev bridge type bridge ageing_time 1800")
254 node
.run("ip link set dev bridge type bridge mcast_snooping 0")
255 node
.run("ip link set dev bridge type bridge vlan_stats_enabled 1")
256 node
.run("ip link set dev bridge up")
257 node
.run("/sbin/bridge vlan add vid 1000 dev bridge")
260 def config_vxlan(node
, node_ip
):
262 Create a VxLAN device for VNI 1000 and add it to the bridge.
263 VLAN-1000 is mapped to VNI-1000.
265 node
.run("ip link add dev vx-1000 type vxlan id 1000 dstport 4789")
266 node
.run("ip link set dev vx-1000 type vxlan nolearning")
267 node
.run("ip link set dev vx-1000 type vxlan local %s" % node_ip
)
268 node
.run("ip link set dev vx-1000 type vxlan ttl 64")
269 node
.run("ip link set dev vx-1000 mtu 9152")
270 node
.run("ip link set dev vx-1000 type vxlan dev ipmr-lo group 239.1.1.100")
271 node
.run("ip link set dev vx-1000 up")
274 node
.run("ip link set dev vx-1000 master bridge")
275 node
.run("/sbin/bridge link set dev vx-1000 neigh_suppress on")
276 node
.run("/sbin/bridge link set dev vx-1000 learning off")
277 node
.run("/sbin/bridge link set dev vx-1000 priority 8")
278 node
.run("/sbin/bridge vlan del vid 1 dev vx-1000")
279 node
.run("/sbin/bridge vlan del vid 1 untagged pvid dev vx-1000")
280 node
.run("/sbin/bridge vlan add vid 1000 dev vx-1000")
281 node
.run("/sbin/bridge vlan add vid 1000 untagged pvid dev vx-1000")
284 def config_svi(node
, svi_pip
):
286 Create an SVI for VLAN 1000
288 node
.run("ip link add link bridge name vlan1000 type vlan id 1000 protocol 802.1q")
289 node
.run("ip addr add %s/24 dev vlan1000" % svi_pip
)
290 node
.run("ip link set dev vlan1000 up")
291 node
.run("/sbin/sysctl net.ipv4.conf.vlan1000.arp_accept=1")
292 node
.run("ip link add link vlan1000 name vlan1000-v0 type macvlan mode private")
293 node
.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.accept_dad=0")
294 node
.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits")
295 node
.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits=0")
296 node
.run("ip link set dev vlan1000-v0 address 00:00:5e:00:01:01")
297 node
.run("ip link set dev vlan1000-v0 up")
298 # metric 1024 is not working
299 node
.run("ip addr add 45.0.0.1/24 dev vlan1000-v0")
302 def config_tor(tor_name
, tor
, tor_ip
, svi_pip
):
304 Create the bond/vxlan-bridge on the TOR which acts as VTEP and EPN-PE
306 # create a device for terminating VxLAN multicast tunnels
307 config_mcast_tunnel_termination_device(tor
)
309 # create a vlan aware bridge
312 # create vxlan device and add it to bridge
313 config_vxlan(tor
, tor_ip
)
315 # create hostbonds and add them to the bridge
316 if "torm1" in tor_name
:
317 sys_mac
= "44:38:39:ff:ff:01"
319 sys_mac
= "44:38:39:ff:ff:02"
320 bond_member
= tor_name
+ "-eth2"
321 config_bond(tor
, "hostbond1", [bond_member
], sys_mac
, "bridge")
323 bond_member
= tor_name
+ "-eth3"
324 config_bond(tor
, "hostbond2", [bond_member
], sys_mac
, "bridge")
327 config_svi(tor
, svi_pip
)
330 def config_tors(tgen
, tors
):
331 for tor_name
in tors
:
332 tor
= tgen
.gears
[tor_name
]
333 config_tor(tor_name
, tor
, tor_ips
.get(tor_name
), svi_ips
.get(tor_name
))
336 def compute_host_ip_mac(host_name
):
337 host_id
= host_name
.split("hostd")[1]
338 host_ip
= "45.0.0." + host_id
+ "/24"
339 host_mac
= "00:00:00:00:00:" + host_id
341 return host_ip
, host_mac
344 def config_host(host_name
, host
):
346 Create the dual-attached bond on host nodes for MH
349 bond_members
.append(host_name
+ "-eth0")
350 bond_members
.append(host_name
+ "-eth1")
351 bond_name
= "torbond"
352 config_bond(host
, bond_name
, bond_members
, "00:00:00:00:00:00", None)
354 host_ip
, host_mac
= compute_host_ip_mac(host_name
)
355 host
.run("ip addr add %s dev %s" % (host_ip
, bond_name
))
356 host
.run("ip link set dev %s address %s" % (bond_name
, host_mac
))
359 def config_hosts(tgen
, hosts
):
360 for host_name
in hosts
:
361 host
= tgen
.gears
[host_name
]
362 config_host(host_name
, host
)
365 def setup_module(module
):
367 tgen
= Topogen(build_topo
, module
.__name
__)
368 tgen
.start_topology()
370 krel
= platform
.release()
371 if topotest
.version_cmp(krel
, "4.19") < 0:
372 tgen
.errors
= "kernel 4.19 needed for multihoming tests"
373 pytest
.skip(tgen
.errors
)
376 tors
.append("torm11")
377 tors
.append("torm12")
378 tors
.append("torm21")
379 tors
.append("torm22")
380 config_tors(tgen
, tors
)
383 hosts
.append("hostd11")
384 hosts
.append("hostd12")
385 hosts
.append("hostd21")
386 hosts
.append("hostd22")
387 config_hosts(tgen
, hosts
)
390 # This is a sample of configuration loading.
391 router_list
= tgen
.routers()
392 for rname
, router
in router_list
.items():
394 TopoRouter
.RD_ZEBRA
, os
.path
.join(CWD
, "{}/zebra.conf".format(rname
))
397 TopoRouter
.RD_PIM
, os
.path
.join(CWD
, "{}/pim.conf".format(rname
))
400 TopoRouter
.RD_BGP
, os
.path
.join(CWD
, "{}/evpn.conf".format(rname
))
406 def teardown_module(_mod
):
407 "Teardown the pytest environment"
410 # This function tears down the whole topology.
414 def check_local_es(esi
, vtep_ips
, dut_name
, down_vteps
):
416 Check if ES peers are setup correctly on local ESs
419 if "torm1" in dut_name
:
420 tor_ips_rack
= tor_ips_rack_1
422 tor_ips_rack
= tor_ips_rack_2
424 for tor_name
, tor_ip
in tor_ips_rack
.items():
425 if dut_name
not in tor_name
:
426 peer_ips
.append(tor_ip
)
428 # remove down VTEPs from the peer check list
429 peer_set
= set(peer_ips
)
430 down_vtep_set
= set(down_vteps
)
431 peer_set
= peer_set
- down_vtep_set
433 vtep_set
= set(vtep_ips
)
434 diff
= peer_set
.symmetric_difference(vtep_set
)
436 return (esi
, diff
) if diff
else None
439 def check_remote_es(esi
, vtep_ips
, dut_name
, down_vteps
):
441 Verify list of PEs associated with a remote ES
445 if "torm1" in dut_name
:
446 tor_ips_rack
= tor_ips_rack_2
448 tor_ips_rack
= tor_ips_rack_1
450 for tor_name
, tor_ip
in tor_ips_rack
.items():
451 remote_ips
.append(tor_ip
)
453 # remove down VTEPs from the remote check list
454 remote_set
= set(remote_ips
)
455 down_vtep_set
= set(down_vteps
)
456 remote_set
= remote_set
- down_vtep_set
458 vtep_set
= set(vtep_ips
)
459 diff
= remote_set
.symmetric_difference(vtep_set
)
461 return (esi
, diff
) if diff
else None
466 Verify list of PEs associated all ESs, local and remote
468 bgp_es
= dut
.vtysh_cmd("show bgp l2vp evpn es json")
469 bgp_es_json
= json
.loads(bgp_es
)
473 expected_es_set
= set([v
for k
, v
in host_es_map
.items()])
476 # check is ES content is correct
477 for es
in bgp_es_json
:
479 curr_es_set
.append(esi
)
482 for vtep
in es
.get("vteps", []):
483 vtep_ips
.append(vtep
["vtep_ip"])
486 result
= check_local_es(esi
, vtep_ips
, dut
.name
, [])
488 result
= check_remote_es(esi
, vtep_ips
, dut
.name
, [])
493 # check if all ESs are present
494 curr_es_set
= set(curr_es_set
)
495 result
= curr_es_set
.symmetric_difference(expected_es_set
)
497 return result
if result
else None
500 def check_one_es(dut
, esi
, down_vteps
):
502 Verify list of PEs associated all ESs, local and remote
504 bgp_es
= dut
.vtysh_cmd("show bgp l2vp evpn es %s json" % esi
)
505 es
= json
.loads(bgp_es
)
508 return "esi %s not found" % esi
513 for vtep
in es
.get("vteps", []):
514 vtep_ips
.append(vtep
["vtep_ip"])
517 result
= check_local_es(esi
, vtep_ips
, dut
.name
, down_vteps
)
519 result
= check_remote_es(esi
, vtep_ips
, dut
.name
, down_vteps
)
526 Two ES are setup on each rack. This test checks if -
527 1. ES peer has been added to the local ES (via Type-1/EAD route)
528 2. The remote ESs are setup with the right list of PEs (via Type-1)
533 if tgen
.routers_have_failure():
534 pytest
.skip(tgen
.errors
)
537 dut
= tgen
.gears
[dut_name
]
538 test_fn
= partial(check_es
, dut
)
539 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
541 assertmsg
= '"{}" ES content incorrect'.format(dut_name
)
542 assert result
is None, assertmsg
546 def test_evpn_ead_update():
548 Flap a host link one the remote rack and check if the EAD updates
549 are sent/processed for the corresponding ESI
553 if tgen
.routers_have_failure():
554 pytest
.skip(tgen
.errors
)
556 # dut on rack1 and host link flap on rack2
558 dut
= tgen
.gears
[dut_name
]
560 remote_tor_name
= "torm21"
561 remote_tor
= tgen
.gears
[remote_tor_name
]
563 host_name
= "hostd21"
564 host
= tgen
.gears
[host_name
]
565 esi
= host_es_map
.get(host_name
)
567 # check if the VTEP list is right to start with
569 test_fn
= partial(check_one_es
, dut
, esi
, down_vteps
)
570 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
571 assertmsg
= '"{}" ES content incorrect'.format(dut_name
)
572 assert result
is None, assertmsg
574 # down a remote host link and check if the EAD withdraw is rxed
575 # Note: LACP is not working as expected so I am temporarily shutting
576 # down the link on the remote TOR instead of the remote host
577 remote_tor
.run("ip link set dev %s-%s down" % (remote_tor_name
, "eth2"))
578 down_vteps
.append(tor_ips
.get(remote_tor_name
))
579 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
580 assertmsg
= '"{}" ES incorrect after remote link down'.format(dut_name
)
581 assert result
is None, assertmsg
583 # bring up remote host link and check if the EAD update is rxed
584 down_vteps
.remove(tor_ips
.get(remote_tor_name
))
585 remote_tor
.run("ip link set dev %s-%s up" % (remote_tor_name
, "eth2"))
586 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
587 assertmsg
= '"{}" ES incorrect after remote link flap'.format(dut_name
)
588 assert result
is None, assertmsg
593 def ping_anycast_gw(tgen
):
594 # ping the anycast gw from the local and remote hosts to populate
595 # the mac address on the PEs
596 python3_path
= tgen
.net
.get_exec_path(["python3", "python"])
597 script_path
= os
.path
.abspath(os
.path
.join(CWD
, "../lib/scapy_sendpkt.py"))
603 "--imports=Ether,ARP",
604 "--interface=" + intf
,
605 'Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst="{}")'.format(ipaddr
),
607 for name
in ("hostd11", "hostd21"):
608 host
= tgen
.net
.hosts
[name
]
609 _
, stdout
, _
= host
.cmd_status(ping_cmd
, warn
=False, stderr
=subprocess
.STDOUT
)
610 stdout
= stdout
.strip()
613 "%s: arping on %s for %s returned: %s", name
, intf
, ipaddr
, stdout
617 def check_mac(dut
, vni
, mac
, m_type
, esi
, intf
, ping_gw
=False, tgen
=None):
619 checks if mac is present and if desination matches the one provided
623 ping_anycast_gw(tgen
)
625 out
= dut
.vtysh_cmd("show evpn mac vni %d mac %s json" % (vni
, mac
))
627 mac_js
= json
.loads(out
)
628 for mac
, info
in mac_js
.items():
629 tmp_esi
= info
.get("esi", "")
630 tmp_m_type
= info
.get("type", "")
631 tmp_intf
= info
.get("intf", "") if tmp_m_type
== "local" else ""
632 if tmp_esi
== esi
and tmp_m_type
== m_type
and intf
== intf
:
635 return "invalid vni %d mac %s out %s" % (vni
, mac
, mac_js
)
640 1. Add a MAC on hostd11 and check if the MAC is synced between
641 torm11 and torm12. And installed as a local MAC.
642 2. Add a MAC on hostd21 and check if the MAC is installed as a
643 remote MAC on torm11 and torm12
648 local_host
= tgen
.gears
["hostd11"]
649 remote_host
= tgen
.gears
["hostd21"]
651 tors
.append(tgen
.gears
["torm11"])
652 tors
.append(tgen
.gears
["torm12"])
656 # check if the rack-1 host MAC is present on all rack-1 PEs
657 # and points to local access port
659 _
, mac
= compute_host_ip_mac(local_host
.name
)
660 esi
= host_es_map
.get(local_host
.name
)
664 test_fn
= partial(check_mac
, tor
, vni
, mac
, m_type
, esi
, intf
, True, tgen
)
665 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
666 assertmsg
= '"{}" local MAC content incorrect'.format(tor
.name
)
667 assert result
is None, assertmsg
669 # check if the rack-2 host MAC is present on all rack-1 PEs
670 # and points to the remote ES destination
672 _
, mac
= compute_host_ip_mac(remote_host
.name
)
673 esi
= host_es_map
.get(remote_host
.name
)
677 test_fn
= partial(check_mac
, tor
, vni
, mac
, m_type
, esi
, intf
)
678 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
679 assertmsg
= '"{}" remote MAC content incorrect'.format(tor
.name
)
680 assert result
is None, assertmsg
683 def check_df_role(dut
, esi
, role
):
685 Return error string if the df role on the dut is different
687 es_json
= dut
.vtysh_cmd("show evpn es %s json" % esi
)
688 es
= json
.loads(es_json
)
691 return "esi %s not found" % esi
693 flags
= es
.get("flags", [])
694 curr_role
= "nonDF" if "nonDF" in flags
else "DF"
696 if curr_role
!= role
:
697 return "%s is %s for %s" % (dut
.name
, curr_role
, esi
)
704 1. Check the DF role on all the PEs on rack-1.
705 2. Increase the DF preference on the non-DF and check if it becomes
711 if tgen
.routers_have_failure():
712 pytest
.skip(tgen
.errors
)
714 # We will run the tests on just one ES
715 esi
= host_es_map
.get("hostd11")
719 tors
.append(tgen
.gears
["torm11"])
720 tors
.append(tgen
.gears
["torm12"])
723 # check roles on rack-1
725 role
= "DF" if tor
.name
== df_node
else "nonDF"
726 test_fn
= partial(check_df_role
, tor
, esi
, role
)
727 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
728 assertmsg
= '"{}" DF role incorrect'.format(tor
.name
)
729 assert result
is None, assertmsg
731 # change df preference on the nonDF to make it the df
732 torm12
= tgen
.gears
["torm12"]
733 torm12
.vtysh_cmd("conf\ninterface %s\nevpn mh es-df-pref %d" % (intf
, 60000))
736 # re-check roles on rack-1; we should have a new winner
738 role
= "DF" if tor
.name
== df_node
else "nonDF"
739 test_fn
= partial(check_df_role
, tor
, esi
, role
)
740 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
741 assertmsg
= '"{}" DF role incorrect'.format(tor
.name
)
742 assert result
is None, assertmsg
747 def check_protodown_rc(dut
, protodown_rc
):
749 check if specified protodown reason code is set
752 out
= dut
.vtysh_cmd("show evpn json")
754 evpn_js
= json
.loads(out
)
755 tmp_rc
= evpn_js
.get("protodownReasons", [])
758 if protodown_rc
not in tmp_rc
:
759 return "protodown %s missing in %s" % (protodown_rc
, tmp_rc
)
762 return "unexpected protodown rc %s" % (tmp_rc
)
767 def test_evpn_uplink_tracking():
769 1. Wait for access ports to come out of startup-delay
770 2. disable uplinks and check if access ports have been protodowned
771 3. enable uplinks and check if access ports have been moved out
778 dut
= tgen
.gears
[dut_name
]
780 # wait for protodown rc to clear after startup
781 test_fn
= partial(check_protodown_rc
, dut
, None)
782 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
783 assertmsg
= '"{}" protodown rc incorrect'.format(dut_name
)
784 assert result
is None, assertmsg
786 # disable the uplinks
787 dut
.run("ip link set %s-eth0 down" % dut_name
)
788 dut
.run("ip link set %s-eth1 down" % dut_name
)
790 # check if the access ports have been protodowned
791 test_fn
= partial(check_protodown_rc
, dut
, "uplinkDown")
792 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
793 assertmsg
= '"{}" protodown rc incorrect'.format(dut_name
)
794 assert result
is None, assertmsg
797 dut
.run("ip link set %s-eth0 up" % dut_name
)
798 dut
.run("ip link set %s-eth1 up" % dut_name
)
800 # check if the access ports have been moved out of protodown
801 test_fn
= partial(check_protodown_rc
, dut
, None)
802 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
803 assertmsg
= '"{}" protodown rc incorrect'.format(dut_name
)
804 assert result
is None, assertmsg
807 if __name__
== "__main__":
808 args
= ["-s"] + sys
.argv
[1:]
809 sys
.exit(pytest
.main(args
))