]>
git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/bgp_evpn_mh/test_evpn_mh.py
0bde5d5a9f529f928e1ab11fdf1247b3dd71af09
2 # SPDX-License-Identifier: ISC
7 # Copyright (c) 2020 by
8 # Cumulus Networks, Inc.
13 test_evpn_mh.py: Testing EVPN multihoming
20 from functools
import partial
25 from functools
import partial
27 pytestmark
= [pytest
.mark
.bgpd
, pytest
.mark
.pimd
]
29 # Save the Current Working Directory to find configuration files.
30 CWD
= os
.path
.dirname(os
.path
.realpath(__file__
))
31 sys
.path
.append(os
.path
.join(CWD
, "../"))
33 # pylint: disable=C0413
34 # Import topogen and topotest helpers
35 from lib
import topotest
37 # Required to instantiate the topology builder class.
38 from lib
.topogen
import Topogen
, TopoRouter
, get_topogen
40 pytestmark
= [pytest
.mark
.bgpd
, pytest
.mark
.pimd
]
42 #####################################################
44 ## Network Topology Definition
46 ## See topology picture at evpn-mh-topo-tests.pdf
47 #####################################################
52 EVPN Multihoming Topology -
54 2. Two spine switches - spine1, spine2
55 3. Two racks with Top-of-Rack switches per rack - tormx1, tormx2
56 4. Two dual attached hosts per-rack - hostdx1, hostdx2
59 tgen
.add_router("spine1")
60 tgen
.add_router("spine2")
61 tgen
.add_router("torm11")
62 tgen
.add_router("torm12")
63 tgen
.add_router("torm21")
64 tgen
.add_router("torm22")
65 tgen
.add_router("hostd11")
66 tgen
.add_router("hostd12")
67 tgen
.add_router("hostd21")
68 tgen
.add_router("hostd22")
71 # First switch is for a dummy interface (for local network)
73 ##################### spine1 ########################
74 # spine1-eth0 is connected to torm11-eth0
75 switch
= tgen
.add_switch("sw1")
76 switch
.add_link(tgen
.gears
["spine1"])
77 switch
.add_link(tgen
.gears
["torm11"])
79 # spine1-eth1 is connected to torm12-eth0
80 switch
= tgen
.add_switch("sw2")
81 switch
.add_link(tgen
.gears
["spine1"])
82 switch
.add_link(tgen
.gears
["torm12"])
84 # spine1-eth2 is connected to torm21-eth0
85 switch
= tgen
.add_switch("sw3")
86 switch
.add_link(tgen
.gears
["spine1"])
87 switch
.add_link(tgen
.gears
["torm21"])
89 # spine1-eth3 is connected to torm22-eth0
90 switch
= tgen
.add_switch("sw4")
91 switch
.add_link(tgen
.gears
["spine1"])
92 switch
.add_link(tgen
.gears
["torm22"])
94 ##################### spine2 ########################
95 # spine2-eth0 is connected to torm11-eth1
96 switch
= tgen
.add_switch("sw5")
97 switch
.add_link(tgen
.gears
["spine2"])
98 switch
.add_link(tgen
.gears
["torm11"])
100 # spine2-eth1 is connected to torm12-eth1
101 switch
= tgen
.add_switch("sw6")
102 switch
.add_link(tgen
.gears
["spine2"])
103 switch
.add_link(tgen
.gears
["torm12"])
105 # spine2-eth2 is connected to torm21-eth1
106 switch
= tgen
.add_switch("sw7")
107 switch
.add_link(tgen
.gears
["spine2"])
108 switch
.add_link(tgen
.gears
["torm21"])
110 # spine2-eth3 is connected to torm22-eth1
111 switch
= tgen
.add_switch("sw8")
112 switch
.add_link(tgen
.gears
["spine2"])
113 switch
.add_link(tgen
.gears
["torm22"])
115 ##################### torm11 ########################
116 # torm11-eth2 is connected to hostd11-eth0
117 switch
= tgen
.add_switch("sw9")
118 switch
.add_link(tgen
.gears
["torm11"])
119 switch
.add_link(tgen
.gears
["hostd11"])
121 # torm11-eth3 is connected to hostd12-eth0
122 switch
= tgen
.add_switch("sw10")
123 switch
.add_link(tgen
.gears
["torm11"])
124 switch
.add_link(tgen
.gears
["hostd12"])
126 ##################### torm12 ########################
127 # torm12-eth2 is connected to hostd11-eth1
128 switch
= tgen
.add_switch("sw11")
129 switch
.add_link(tgen
.gears
["torm12"])
130 switch
.add_link(tgen
.gears
["hostd11"])
132 # torm12-eth3 is connected to hostd12-eth1
133 switch
= tgen
.add_switch("sw12")
134 switch
.add_link(tgen
.gears
["torm12"])
135 switch
.add_link(tgen
.gears
["hostd12"])
137 ##################### torm21 ########################
138 # torm21-eth2 is connected to hostd21-eth0
139 switch
= tgen
.add_switch("sw13")
140 switch
.add_link(tgen
.gears
["torm21"])
141 switch
.add_link(tgen
.gears
["hostd21"])
143 # torm21-eth3 is connected to hostd22-eth0
144 switch
= tgen
.add_switch("sw14")
145 switch
.add_link(tgen
.gears
["torm21"])
146 switch
.add_link(tgen
.gears
["hostd22"])
148 ##################### torm22 ########################
149 # torm22-eth2 is connected to hostd21-eth1
150 switch
= tgen
.add_switch("sw15")
151 switch
.add_link(tgen
.gears
["torm22"])
152 switch
.add_link(tgen
.gears
["hostd21"])
154 # torm22-eth3 is connected to hostd22-eth1
155 switch
= tgen
.add_switch("sw16")
156 switch
.add_link(tgen
.gears
["torm22"])
157 switch
.add_link(tgen
.gears
["hostd22"])
160 #####################################################
164 #####################################################
167 "torm11": "192.168.100.15",
168 "torm12": "192.168.100.16",
169 "torm21": "192.168.100.17",
170 "torm22": "192.168.100.18",
174 "torm11": "45.0.0.2",
175 "torm12": "45.0.0.3",
176 "torm21": "45.0.0.4",
177 "torm22": "45.0.0.5",
180 tor_ips_rack_1
= {"torm11": "192.168.100.15", "torm12": "192.168.100.16"}
182 tor_ips_rack_2
= {"torm21": "192.168.100.17", "torm22": "192.168.100.18"}
185 "hostd11": "03:44:38:39:ff:ff:01:00:00:01",
186 "hostd12": "03:44:38:39:ff:ff:01:00:00:02",
187 "hostd21": "03:44:38:39:ff:ff:02:00:00:01",
188 "hostd22": "03:44:38:39:ff:ff:02:00:00:02",
192 def config_bond(node
, bond_name
, bond_members
, bond_ad_sys_mac
, br
):
194 Used to setup bonds on the TORs and hosts for MH
196 node
.run("ip link add dev %s type bond mode 802.3ad" % bond_name
)
197 node
.run("ip link set dev %s type bond lacp_rate 1" % bond_name
)
198 node
.run("ip link set dev %s type bond miimon 100" % bond_name
)
199 node
.run("ip link set dev %s type bond xmit_hash_policy layer3+4" % bond_name
)
200 node
.run("ip link set dev %s type bond min_links 1" % bond_name
)
202 "ip link set dev %s type bond ad_actor_system %s" % (bond_name
, bond_ad_sys_mac
)
205 for bond_member
in bond_members
:
206 node
.run("ip link set dev %s down" % bond_member
)
207 node
.run("ip link set dev %s master %s" % (bond_member
, bond_name
))
208 node
.run("ip link set dev %s up" % bond_member
)
210 node
.run("ip link set dev %s up" % bond_name
)
212 # if bridge is specified add the bond as a bridge member
214 node
.run(" ip link set dev %s master bridge" % bond_name
)
215 node
.run("/sbin/bridge link set dev %s priority 8" % bond_name
)
216 node
.run("/sbin/bridge vlan del vid 1 dev %s" % bond_name
)
217 node
.run("/sbin/bridge vlan del vid 1 untagged pvid dev %s" % bond_name
)
218 node
.run("/sbin/bridge vlan add vid 1000 dev %s" % bond_name
)
219 node
.run("/sbin/bridge vlan add vid 1000 untagged pvid dev %s" % bond_name
)
222 def config_mcast_tunnel_termination_device(node
):
224 The kernel requires a device to terminate VxLAN multicast tunnels
225 when EVPN-PIM is used for flooded traffic
227 node
.run("ip link add dev ipmr-lo type dummy")
228 node
.run("ip link set dev ipmr-lo mtu 16000")
229 node
.run("ip link set dev ipmr-lo mode dormant")
230 node
.run("ip link set dev ipmr-lo up")
233 def config_bridge(node
):
235 Create a VLAN aware bridge
237 node
.run("ip link add dev bridge type bridge stp_state 0")
238 node
.run("ip link set dev bridge type bridge vlan_filtering 1")
239 node
.run("ip link set dev bridge mtu 9216")
240 node
.run("ip link set dev bridge type bridge ageing_time 1800")
241 node
.run("ip link set dev bridge type bridge mcast_snooping 0")
242 node
.run("ip link set dev bridge type bridge vlan_stats_enabled 1")
243 node
.run("ip link set dev bridge up")
244 node
.run("/sbin/bridge vlan add vid 1000 dev bridge")
247 def config_vxlan(node
, node_ip
):
249 Create a VxLAN device for VNI 1000 and add it to the bridge.
250 VLAN-1000 is mapped to VNI-1000.
252 node
.run("ip link add dev vx-1000 type vxlan id 1000 dstport 4789")
253 node
.run("ip link set dev vx-1000 type vxlan nolearning")
254 node
.run("ip link set dev vx-1000 type vxlan local %s" % node_ip
)
255 node
.run("ip link set dev vx-1000 type vxlan ttl 64")
256 node
.run("ip link set dev vx-1000 mtu 9152")
257 node
.run("ip link set dev vx-1000 type vxlan dev ipmr-lo group 239.1.1.100")
258 node
.run("ip link set dev vx-1000 up")
261 node
.run("ip link set dev vx-1000 master bridge")
262 node
.run("/sbin/bridge link set dev vx-1000 neigh_suppress on")
263 node
.run("/sbin/bridge link set dev vx-1000 learning off")
264 node
.run("/sbin/bridge link set dev vx-1000 priority 8")
265 node
.run("/sbin/bridge vlan del vid 1 dev vx-1000")
266 node
.run("/sbin/bridge vlan del vid 1 untagged pvid dev vx-1000")
267 node
.run("/sbin/bridge vlan add vid 1000 dev vx-1000")
268 node
.run("/sbin/bridge vlan add vid 1000 untagged pvid dev vx-1000")
271 def config_svi(node
, svi_pip
):
273 Create an SVI for VLAN 1000
275 node
.run("ip link add link bridge name vlan1000 type vlan id 1000 protocol 802.1q")
276 node
.run("ip addr add %s/24 dev vlan1000" % svi_pip
)
277 node
.run("ip link set dev vlan1000 up")
278 node
.run("/sbin/sysctl net.ipv4.conf.vlan1000.arp_accept=1")
279 node
.run("ip link add link vlan1000 name vlan1000-v0 type macvlan mode private")
280 node
.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.accept_dad=0")
281 node
.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits")
282 node
.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits=0")
283 node
.run("ip link set dev vlan1000-v0 address 00:00:5e:00:01:01")
284 node
.run("ip link set dev vlan1000-v0 up")
285 # metric 1024 is not working
286 node
.run("ip addr add 45.0.0.1/24 dev vlan1000-v0")
289 def config_tor(tor_name
, tor
, tor_ip
, svi_pip
):
291 Create the bond/vxlan-bridge on the TOR which acts as VTEP and EPN-PE
293 # create a device for terminating VxLAN multicast tunnels
294 config_mcast_tunnel_termination_device(tor
)
296 # create a vlan aware bridge
299 # create vxlan device and add it to bridge
300 config_vxlan(tor
, tor_ip
)
302 # create hostbonds and add them to the bridge
303 if "torm1" in tor_name
:
304 sys_mac
= "44:38:39:ff:ff:01"
306 sys_mac
= "44:38:39:ff:ff:02"
307 bond_member
= tor_name
+ "-eth2"
308 config_bond(tor
, "hostbond1", [bond_member
], sys_mac
, "bridge")
310 bond_member
= tor_name
+ "-eth3"
311 config_bond(tor
, "hostbond2", [bond_member
], sys_mac
, "bridge")
314 config_svi(tor
, svi_pip
)
317 def config_tors(tgen
, tors
):
318 for tor_name
in tors
:
319 tor
= tgen
.gears
[tor_name
]
320 config_tor(tor_name
, tor
, tor_ips
.get(tor_name
), svi_ips
.get(tor_name
))
323 def compute_host_ip_mac(host_name
):
324 host_id
= host_name
.split("hostd")[1]
325 host_ip
= "45.0.0." + host_id
+ "/24"
326 host_mac
= "00:00:00:00:00:" + host_id
328 return host_ip
, host_mac
331 def config_host(host_name
, host
):
333 Create the dual-attached bond on host nodes for MH
336 bond_members
.append(host_name
+ "-eth0")
337 bond_members
.append(host_name
+ "-eth1")
338 bond_name
= "torbond"
339 config_bond(host
, bond_name
, bond_members
, "00:00:00:00:00:00", None)
341 host_ip
, host_mac
= compute_host_ip_mac(host_name
)
342 host
.run("ip addr add %s dev %s" % (host_ip
, bond_name
))
343 host
.run("ip link set dev %s address %s" % (bond_name
, host_mac
))
346 def config_hosts(tgen
, hosts
):
347 for host_name
in hosts
:
348 host
= tgen
.gears
[host_name
]
349 config_host(host_name
, host
)
352 def setup_module(module
):
354 tgen
= Topogen(build_topo
, module
.__name
__)
355 tgen
.start_topology()
357 krel
= platform
.release()
358 if topotest
.version_cmp(krel
, "4.19") < 0:
359 tgen
.errors
= "kernel 4.19 needed for multihoming tests"
360 pytest
.skip(tgen
.errors
)
363 tors
.append("torm11")
364 tors
.append("torm12")
365 tors
.append("torm21")
366 tors
.append("torm22")
367 config_tors(tgen
, tors
)
370 hosts
.append("hostd11")
371 hosts
.append("hostd12")
372 hosts
.append("hostd21")
373 hosts
.append("hostd22")
374 config_hosts(tgen
, hosts
)
377 # This is a sample of configuration loading.
378 router_list
= tgen
.routers()
379 for rname
, router
in router_list
.items():
381 TopoRouter
.RD_ZEBRA
, os
.path
.join(CWD
, "{}/zebra.conf".format(rname
))
384 TopoRouter
.RD_PIM
, os
.path
.join(CWD
, "{}/pim.conf".format(rname
))
387 TopoRouter
.RD_BGP
, os
.path
.join(CWD
, "{}/evpn.conf".format(rname
))
393 def teardown_module(_mod
):
394 "Teardown the pytest environment"
397 # This function tears down the whole topology.
401 def check_local_es(esi
, vtep_ips
, dut_name
, down_vteps
):
403 Check if ES peers are setup correctly on local ESs
406 if "torm1" in dut_name
:
407 tor_ips_rack
= tor_ips_rack_1
409 tor_ips_rack
= tor_ips_rack_2
411 for tor_name
, tor_ip
in tor_ips_rack
.items():
412 if dut_name
not in tor_name
:
413 peer_ips
.append(tor_ip
)
415 # remove down VTEPs from the peer check list
416 peer_set
= set(peer_ips
)
417 down_vtep_set
= set(down_vteps
)
418 peer_set
= peer_set
- down_vtep_set
420 vtep_set
= set(vtep_ips
)
421 diff
= peer_set
.symmetric_difference(vtep_set
)
423 return (esi
, diff
) if diff
else None
426 def check_remote_es(esi
, vtep_ips
, dut_name
, down_vteps
):
428 Verify list of PEs associated with a remote ES
432 if "torm1" in dut_name
:
433 tor_ips_rack
= tor_ips_rack_2
435 tor_ips_rack
= tor_ips_rack_1
437 for tor_name
, tor_ip
in tor_ips_rack
.items():
438 remote_ips
.append(tor_ip
)
440 # remove down VTEPs from the remote check list
441 remote_set
= set(remote_ips
)
442 down_vtep_set
= set(down_vteps
)
443 remote_set
= remote_set
- down_vtep_set
445 vtep_set
= set(vtep_ips
)
446 diff
= remote_set
.symmetric_difference(vtep_set
)
448 return (esi
, diff
) if diff
else None
453 Verify list of PEs associated all ESs, local and remote
455 bgp_es
= dut
.vtysh_cmd("show bgp l2vp evpn es json")
456 bgp_es_json
= json
.loads(bgp_es
)
460 expected_es_set
= set([v
for k
, v
in host_es_map
.items()])
463 # check is ES content is correct
464 for es
in bgp_es_json
:
466 curr_es_set
.append(esi
)
469 for vtep
in es
.get("vteps", []):
470 vtep_ips
.append(vtep
["vtep_ip"])
473 result
= check_local_es(esi
, vtep_ips
, dut
.name
, [])
475 result
= check_remote_es(esi
, vtep_ips
, dut
.name
, [])
480 # check if all ESs are present
481 curr_es_set
= set(curr_es_set
)
482 result
= curr_es_set
.symmetric_difference(expected_es_set
)
484 return result
if result
else None
487 def check_one_es(dut
, esi
, down_vteps
):
489 Verify list of PEs associated all ESs, local and remote
491 bgp_es
= dut
.vtysh_cmd("show bgp l2vp evpn es %s json" % esi
)
492 es
= json
.loads(bgp_es
)
495 return "esi %s not found" % esi
500 for vtep
in es
.get("vteps", []):
501 vtep_ips
.append(vtep
["vtep_ip"])
504 result
= check_local_es(esi
, vtep_ips
, dut
.name
, down_vteps
)
506 result
= check_remote_es(esi
, vtep_ips
, dut
.name
, down_vteps
)
513 Two ES are setup on each rack. This test checks if -
514 1. ES peer has been added to the local ES (via Type-1/EAD route)
515 2. The remote ESs are setup with the right list of PEs (via Type-1)
520 if tgen
.routers_have_failure():
521 pytest
.skip(tgen
.errors
)
524 dut
= tgen
.gears
[dut_name
]
525 test_fn
= partial(check_es
, dut
)
526 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
528 assertmsg
= '"{}" ES content incorrect'.format(dut_name
)
529 assert result
is None, assertmsg
533 def test_evpn_ead_update():
535 Flap a host link one the remote rack and check if the EAD updates
536 are sent/processed for the corresponding ESI
540 if tgen
.routers_have_failure():
541 pytest
.skip(tgen
.errors
)
543 # dut on rack1 and host link flap on rack2
545 dut
= tgen
.gears
[dut_name
]
547 remote_tor_name
= "torm21"
548 remote_tor
= tgen
.gears
[remote_tor_name
]
550 host_name
= "hostd21"
551 host
= tgen
.gears
[host_name
]
552 esi
= host_es_map
.get(host_name
)
554 # check if the VTEP list is right to start with
556 test_fn
= partial(check_one_es
, dut
, esi
, down_vteps
)
557 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
558 assertmsg
= '"{}" ES content incorrect'.format(dut_name
)
559 assert result
is None, assertmsg
561 # down a remote host link and check if the EAD withdraw is rxed
562 # Note: LACP is not working as expected so I am temporarily shutting
563 # down the link on the remote TOR instead of the remote host
564 remote_tor
.run("ip link set dev %s-%s down" % (remote_tor_name
, "eth2"))
565 down_vteps
.append(tor_ips
.get(remote_tor_name
))
566 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
567 assertmsg
= '"{}" ES incorrect after remote link down'.format(dut_name
)
568 assert result
is None, assertmsg
570 # bring up remote host link and check if the EAD update is rxed
571 down_vteps
.remove(tor_ips
.get(remote_tor_name
))
572 remote_tor
.run("ip link set dev %s-%s up" % (remote_tor_name
, "eth2"))
573 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
574 assertmsg
= '"{}" ES incorrect after remote link flap'.format(dut_name
)
575 assert result
is None, assertmsg
580 def ping_anycast_gw(tgen
):
581 # ping the anycast gw from the local and remote hosts to populate
582 # the mac address on the PEs
583 python3_path
= tgen
.net
.get_exec_path(["python3", "python"])
584 script_path
= os
.path
.abspath(os
.path
.join(CWD
, "../lib/scapy_sendpkt.py"))
590 "--imports=Ether,ARP",
591 "--interface=" + intf
,
592 'Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst="{}")'.format(ipaddr
),
594 for name
in ("hostd11", "hostd21"):
595 host
= tgen
.net
.hosts
[name
]
596 _
, stdout
, _
= host
.cmd_status(ping_cmd
, warn
=False, stderr
=subprocess
.STDOUT
)
597 stdout
= stdout
.strip()
600 "%s: arping on %s for %s returned: %s", name
, intf
, ipaddr
, stdout
604 def check_mac(dut
, vni
, mac
, m_type
, esi
, intf
, ping_gw
=False, tgen
=None):
606 checks if mac is present and if desination matches the one provided
610 ping_anycast_gw(tgen
)
612 out
= dut
.vtysh_cmd("show evpn mac vni %d mac %s json" % (vni
, mac
))
614 mac_js
= json
.loads(out
)
615 for mac
, info
in mac_js
.items():
616 tmp_esi
= info
.get("esi", "")
617 tmp_m_type
= info
.get("type", "")
618 tmp_intf
= info
.get("intf", "") if tmp_m_type
== "local" else ""
619 if tmp_esi
== esi
and tmp_m_type
== m_type
and intf
== intf
:
622 return "invalid vni %d mac %s out %s" % (vni
, mac
, mac_js
)
627 1. Add a MAC on hostd11 and check if the MAC is synced between
628 torm11 and torm12. And installed as a local MAC.
629 2. Add a MAC on hostd21 and check if the MAC is installed as a
630 remote MAC on torm11 and torm12
635 local_host
= tgen
.gears
["hostd11"]
636 remote_host
= tgen
.gears
["hostd21"]
638 tors
.append(tgen
.gears
["torm11"])
639 tors
.append(tgen
.gears
["torm12"])
643 # check if the rack-1 host MAC is present on all rack-1 PEs
644 # and points to local access port
646 _
, mac
= compute_host_ip_mac(local_host
.name
)
647 esi
= host_es_map
.get(local_host
.name
)
651 test_fn
= partial(check_mac
, tor
, vni
, mac
, m_type
, esi
, intf
, True, tgen
)
652 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
653 assertmsg
= '"{}" local MAC content incorrect'.format(tor
.name
)
654 assert result
is None, assertmsg
656 # check if the rack-2 host MAC is present on all rack-1 PEs
657 # and points to the remote ES destination
659 _
, mac
= compute_host_ip_mac(remote_host
.name
)
660 esi
= host_es_map
.get(remote_host
.name
)
664 test_fn
= partial(check_mac
, tor
, vni
, mac
, m_type
, esi
, intf
)
665 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
666 assertmsg
= '"{}" remote MAC content incorrect'.format(tor
.name
)
667 assert result
is None, assertmsg
670 def check_df_role(dut
, esi
, role
):
672 Return error string if the df role on the dut is different
674 es_json
= dut
.vtysh_cmd("show evpn es %s json" % esi
)
675 es
= json
.loads(es_json
)
678 return "esi %s not found" % esi
680 flags
= es
.get("flags", [])
681 curr_role
= "nonDF" if "nonDF" in flags
else "DF"
683 if curr_role
!= role
:
684 return "%s is %s for %s" % (dut
.name
, curr_role
, esi
)
691 1. Check the DF role on all the PEs on rack-1.
692 2. Increase the DF preference on the non-DF and check if it becomes
698 if tgen
.routers_have_failure():
699 pytest
.skip(tgen
.errors
)
701 # We will run the tests on just one ES
702 esi
= host_es_map
.get("hostd11")
706 tors
.append(tgen
.gears
["torm11"])
707 tors
.append(tgen
.gears
["torm12"])
710 # check roles on rack-1
712 role
= "DF" if tor
.name
== df_node
else "nonDF"
713 test_fn
= partial(check_df_role
, tor
, esi
, role
)
714 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
715 assertmsg
= '"{}" DF role incorrect'.format(tor
.name
)
716 assert result
is None, assertmsg
718 # change df preference on the nonDF to make it the df
719 torm12
= tgen
.gears
["torm12"]
720 torm12
.vtysh_cmd("conf\ninterface %s\nevpn mh es-df-pref %d" % (intf
, 60000))
723 # re-check roles on rack-1; we should have a new winner
725 role
= "DF" if tor
.name
== df_node
else "nonDF"
726 test_fn
= partial(check_df_role
, tor
, esi
, role
)
727 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
728 assertmsg
= '"{}" DF role incorrect'.format(tor
.name
)
729 assert result
is None, assertmsg
734 def check_protodown_rc(dut
, protodown_rc
):
736 check if specified protodown reason code is set
739 out
= dut
.vtysh_cmd("show evpn json")
741 evpn_js
= json
.loads(out
)
742 tmp_rc
= evpn_js
.get("protodownReasons", [])
745 if protodown_rc
not in tmp_rc
:
746 return "protodown %s missing in %s" % (protodown_rc
, tmp_rc
)
749 return "unexpected protodown rc %s" % (tmp_rc
)
754 def test_evpn_uplink_tracking():
756 1. Wait for access ports to come out of startup-delay
757 2. disable uplinks and check if access ports have been protodowned
758 3. enable uplinks and check if access ports have been moved out
765 dut
= tgen
.gears
[dut_name
]
767 # wait for protodown rc to clear after startup
768 test_fn
= partial(check_protodown_rc
, dut
, None)
769 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
770 assertmsg
= '"{}" protodown rc incorrect'.format(dut_name
)
771 assert result
is None, assertmsg
773 # disable the uplinks
774 dut
.run("ip link set %s-eth0 down" % dut_name
)
775 dut
.run("ip link set %s-eth1 down" % dut_name
)
777 # check if the access ports have been protodowned
778 test_fn
= partial(check_protodown_rc
, dut
, "uplinkDown")
779 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
780 assertmsg
= '"{}" protodown rc incorrect'.format(dut_name
)
781 assert result
is None, assertmsg
784 dut
.run("ip link set %s-eth0 up" % dut_name
)
785 dut
.run("ip link set %s-eth1 up" % dut_name
)
787 # check if the access ports have been moved out of protodown
788 test_fn
= partial(check_protodown_rc
, dut
, None)
789 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
790 assertmsg
= '"{}" protodown rc incorrect'.format(dut_name
)
791 assert result
is None, assertmsg
794 if __name__
== "__main__":
795 args
= ["-s"] + sys
.argv
[1:]
796 sys
.exit(pytest
.main(args
))