]>
git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/bgp_evpn_mh/test_evpn_mh.py
6 # Copyright (c) 2020 by
7 # Cumulus Networks, Inc.
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
26 test_evpn_mh.py: Testing EVPN multihoming
36 from functools
import partial
38 pytestmark
= [pytest
.mark
.bgpd
, pytest
.mark
.pimd
]
40 # Save the Current Working Directory to find configuration files.
41 CWD
= os
.path
.dirname(os
.path
.realpath(__file__
))
42 sys
.path
.append(os
.path
.join(CWD
, "../"))
44 # pylint: disable=C0413
45 # Import topogen and topotest helpers
46 from lib
import topotest
47 from lib
.topogen
import Topogen
, TopoRouter
, get_topogen
48 from lib
.topolog
import logger
50 # Required to instantiate the topology builder class.
51 from mininet
.topo
import Topo
53 #####################################################
55 ## Network Topology Definition
57 ## See topology picture at evpn-mh-topo-tests.pdf
58 #####################################################
61 class NetworkTopo(Topo
):
63 EVPN Multihoming Topology -
65 2. Two spine switches - spine1, spine2
66 3. Two racks with Top-of-Rack switches per rack - tormx1, tormx2
67 4. Two dual attached hosts per-rack - hostdx1, hostdx2
70 def build(self
, **_opts
):
73 tgen
= get_topogen(self
)
75 tgen
.add_router("spine1")
76 tgen
.add_router("spine2")
77 tgen
.add_router("torm11")
78 tgen
.add_router("torm12")
79 tgen
.add_router("torm21")
80 tgen
.add_router("torm22")
81 tgen
.add_router("hostd11")
82 tgen
.add_router("hostd12")
83 tgen
.add_router("hostd21")
84 tgen
.add_router("hostd22")
87 # First switch is for a dummy interface (for local network)
89 ##################### spine1 ########################
90 # spine1-eth0 is connected to torm11-eth0
91 switch
= tgen
.add_switch("sw1")
92 switch
.add_link(tgen
.gears
["spine1"])
93 switch
.add_link(tgen
.gears
["torm11"])
95 # spine1-eth1 is connected to torm12-eth0
96 switch
= tgen
.add_switch("sw2")
97 switch
.add_link(tgen
.gears
["spine1"])
98 switch
.add_link(tgen
.gears
["torm12"])
100 # spine1-eth2 is connected to torm21-eth0
101 switch
= tgen
.add_switch("sw3")
102 switch
.add_link(tgen
.gears
["spine1"])
103 switch
.add_link(tgen
.gears
["torm21"])
105 # spine1-eth3 is connected to torm22-eth0
106 switch
= tgen
.add_switch("sw4")
107 switch
.add_link(tgen
.gears
["spine1"])
108 switch
.add_link(tgen
.gears
["torm22"])
110 ##################### spine2 ########################
111 # spine2-eth0 is connected to torm11-eth1
112 switch
= tgen
.add_switch("sw5")
113 switch
.add_link(tgen
.gears
["spine2"])
114 switch
.add_link(tgen
.gears
["torm11"])
116 # spine2-eth1 is connected to torm12-eth1
117 switch
= tgen
.add_switch("sw6")
118 switch
.add_link(tgen
.gears
["spine2"])
119 switch
.add_link(tgen
.gears
["torm12"])
121 # spine2-eth2 is connected to torm21-eth1
122 switch
= tgen
.add_switch("sw7")
123 switch
.add_link(tgen
.gears
["spine2"])
124 switch
.add_link(tgen
.gears
["torm21"])
126 # spine2-eth3 is connected to torm22-eth1
127 switch
= tgen
.add_switch("sw8")
128 switch
.add_link(tgen
.gears
["spine2"])
129 switch
.add_link(tgen
.gears
["torm22"])
131 ##################### torm11 ########################
132 # torm11-eth2 is connected to hostd11-eth0
133 switch
= tgen
.add_switch("sw9")
134 switch
.add_link(tgen
.gears
["torm11"])
135 switch
.add_link(tgen
.gears
["hostd11"])
137 # torm11-eth3 is connected to hostd12-eth0
138 switch
= tgen
.add_switch("sw10")
139 switch
.add_link(tgen
.gears
["torm11"])
140 switch
.add_link(tgen
.gears
["hostd12"])
142 ##################### torm12 ########################
143 # torm12-eth2 is connected to hostd11-eth1
144 switch
= tgen
.add_switch("sw11")
145 switch
.add_link(tgen
.gears
["torm12"])
146 switch
.add_link(tgen
.gears
["hostd11"])
148 # torm12-eth3 is connected to hostd12-eth1
149 switch
= tgen
.add_switch("sw12")
150 switch
.add_link(tgen
.gears
["torm12"])
151 switch
.add_link(tgen
.gears
["hostd12"])
153 ##################### torm21 ########################
154 # torm21-eth2 is connected to hostd21-eth0
155 switch
= tgen
.add_switch("sw13")
156 switch
.add_link(tgen
.gears
["torm21"])
157 switch
.add_link(tgen
.gears
["hostd21"])
159 # torm21-eth3 is connected to hostd22-eth0
160 switch
= tgen
.add_switch("sw14")
161 switch
.add_link(tgen
.gears
["torm21"])
162 switch
.add_link(tgen
.gears
["hostd22"])
164 ##################### torm22 ########################
165 # torm22-eth2 is connected to hostd21-eth1
166 switch
= tgen
.add_switch("sw15")
167 switch
.add_link(tgen
.gears
["torm22"])
168 switch
.add_link(tgen
.gears
["hostd21"])
170 # torm22-eth3 is connected to hostd22-eth1
171 switch
= tgen
.add_switch("sw16")
172 switch
.add_link(tgen
.gears
["torm22"])
173 switch
.add_link(tgen
.gears
["hostd22"])
176 #####################################################
180 #####################################################
183 "torm11": "192.168.100.15",
184 "torm12": "192.168.100.16",
185 "torm21": "192.168.100.17",
186 "torm22": "192.168.100.18",
190 "torm11": "45.0.0.2",
191 "torm12": "45.0.0.3",
192 "torm21": "45.0.0.4",
193 "torm22": "45.0.0.5",
196 tor_ips_rack_1
= {"torm11": "192.168.100.15", "torm12": "192.168.100.16"}
198 tor_ips_rack_2
= {"torm21": "192.168.100.17", "torm22": "192.168.100.18"}
201 "hostd11": "03:44:38:39:ff:ff:01:00:00:01",
202 "hostd12": "03:44:38:39:ff:ff:01:00:00:02",
203 "hostd21": "03:44:38:39:ff:ff:02:00:00:01",
204 "hostd22": "03:44:38:39:ff:ff:02:00:00:02",
208 def config_bond(node
, bond_name
, bond_members
, bond_ad_sys_mac
, br
):
210 Used to setup bonds on the TORs and hosts for MH
212 node
.run("ip link add dev %s type bond mode 802.3ad" % bond_name
)
213 node
.run("ip link set dev %s type bond lacp_rate 1" % bond_name
)
214 node
.run("ip link set dev %s type bond miimon 100" % bond_name
)
215 node
.run("ip link set dev %s type bond xmit_hash_policy layer3+4" % bond_name
)
216 node
.run("ip link set dev %s type bond min_links 1" % bond_name
)
218 "ip link set dev %s type bond ad_actor_system %s" % (bond_name
, bond_ad_sys_mac
)
221 for bond_member
in bond_members
:
222 node
.run("ip link set dev %s down" % bond_member
)
223 node
.run("ip link set dev %s master %s" % (bond_member
, bond_name
))
224 node
.run("ip link set dev %s up" % bond_member
)
226 node
.run("ip link set dev %s up" % bond_name
)
228 # if bridge is specified add the bond as a bridge member
230 node
.run(" ip link set dev %s master bridge" % bond_name
)
231 node
.run("/sbin/bridge link set dev %s priority 8" % bond_name
)
232 node
.run("/sbin/bridge vlan del vid 1 dev %s" % bond_name
)
233 node
.run("/sbin/bridge vlan del vid 1 untagged pvid dev %s" % bond_name
)
234 node
.run("/sbin/bridge vlan add vid 1000 dev %s" % bond_name
)
235 node
.run("/sbin/bridge vlan add vid 1000 untagged pvid dev %s" % bond_name
)
238 def config_mcast_tunnel_termination_device(node
):
240 The kernel requires a device to terminate VxLAN multicast tunnels
241 when EVPN-PIM is used for flooded traffic
243 node
.run("ip link add dev ipmr-lo type dummy")
244 node
.run("ip link set dev ipmr-lo mtu 16000")
245 node
.run("ip link set dev ipmr-lo mode dormant")
246 node
.run("ip link set dev ipmr-lo up")
249 def config_bridge(node
):
251 Create a VLAN aware bridge
253 node
.run("ip link add dev bridge type bridge stp_state 0")
254 node
.run("ip link set dev bridge type bridge vlan_filtering 1")
255 node
.run("ip link set dev bridge mtu 9216")
256 node
.run("ip link set dev bridge type bridge ageing_time 1800")
257 node
.run("ip link set dev bridge type bridge mcast_snooping 0")
258 node
.run("ip link set dev bridge type bridge vlan_stats_enabled 1")
259 node
.run("ip link set dev bridge up")
260 node
.run("/sbin/bridge vlan add vid 1000 dev bridge")
263 def config_vxlan(node
, node_ip
):
265 Create a VxLAN device for VNI 1000 and add it to the bridge.
266 VLAN-1000 is mapped to VNI-1000.
268 node
.run("ip link add dev vx-1000 type vxlan id 1000 dstport 4789")
269 node
.run("ip link set dev vx-1000 type vxlan nolearning")
270 node
.run("ip link set dev vx-1000 type vxlan local %s" % node_ip
)
271 node
.run("ip link set dev vx-1000 type vxlan ttl 64")
272 node
.run("ip link set dev vx-1000 mtu 9152")
273 node
.run("ip link set dev vx-1000 type vxlan dev ipmr-lo group 239.1.1.100")
274 node
.run("ip link set dev vx-1000 up")
277 node
.run("ip link set dev vx-1000 master bridge")
278 node
.run("/sbin/bridge link set dev vx-1000 neigh_suppress on")
279 node
.run("/sbin/bridge link set dev vx-1000 learning off")
280 node
.run("/sbin/bridge link set dev vx-1000 priority 8")
281 node
.run("/sbin/bridge vlan del vid 1 dev vx-1000")
282 node
.run("/sbin/bridge vlan del vid 1 untagged pvid dev vx-1000")
283 node
.run("/sbin/bridge vlan add vid 1000 dev vx-1000")
284 node
.run("/sbin/bridge vlan add vid 1000 untagged pvid dev vx-1000")
287 def config_svi(node
, svi_pip
):
289 Create an SVI for VLAN 1000
291 node
.run("ip link add link bridge name vlan1000 type vlan id 1000 protocol 802.1q")
292 node
.run("ip addr add %s/24 dev vlan1000" % svi_pip
)
293 node
.run("ip link set dev vlan1000 up")
294 node
.run("/sbin/sysctl net.ipv4.conf.vlan1000.arp_accept=1")
295 node
.run("ip link add link vlan1000 name vlan1000-v0 type macvlan mode private")
296 node
.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.accept_dad=0")
297 node
.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits")
298 node
.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits=0")
299 node
.run("ip link set dev vlan1000-v0 address 00:00:5e:00:01:01")
300 node
.run("ip link set dev vlan1000-v0 up")
301 # metric 1024 is not working
302 node
.run("ip addr add 45.0.0.1/24 dev vlan1000-v0")
305 def config_tor(tor_name
, tor
, tor_ip
, svi_pip
):
307 Create the bond/vxlan-bridge on the TOR which acts as VTEP and EPN-PE
309 # create a device for terminating VxLAN multicast tunnels
310 config_mcast_tunnel_termination_device(tor
)
312 # create a vlan aware bridge
315 # create vxlan device and add it to bridge
316 config_vxlan(tor
, tor_ip
)
318 # create hostbonds and add them to the bridge
319 if "torm1" in tor_name
:
320 sys_mac
= "44:38:39:ff:ff:01"
322 sys_mac
= "44:38:39:ff:ff:02"
323 bond_member
= tor_name
+ "-eth2"
324 config_bond(tor
, "hostbond1", [bond_member
], sys_mac
, "bridge")
326 bond_member
= tor_name
+ "-eth3"
327 config_bond(tor
, "hostbond2", [bond_member
], sys_mac
, "bridge")
330 config_svi(tor
, svi_pip
)
333 def config_tors(tgen
, tors
):
334 for tor_name
in tors
:
335 tor
= tgen
.gears
[tor_name
]
336 config_tor(tor_name
, tor
, tor_ips
.get(tor_name
), svi_ips
.get(tor_name
))
339 def compute_host_ip_mac(host_name
):
340 host_id
= host_name
.split("hostd")[1]
341 host_ip
= "45.0.0." + host_id
+ "/24"
342 host_mac
= "00:00:00:00:00:" + host_id
344 return host_ip
, host_mac
347 def config_host(host_name
, host
):
349 Create the dual-attached bond on host nodes for MH
352 bond_members
.append(host_name
+ "-eth0")
353 bond_members
.append(host_name
+ "-eth1")
354 bond_name
= "torbond"
355 config_bond(host
, bond_name
, bond_members
, "00:00:00:00:00:00", None)
357 host_ip
, host_mac
= compute_host_ip_mac(host_name
)
358 host
.run("ip addr add %s dev %s" % (host_ip
, bond_name
))
359 host
.run("ip link set dev %s address %s" % (bond_name
, host_mac
))
362 def config_hosts(tgen
, hosts
):
363 for host_name
in hosts
:
364 host
= tgen
.gears
[host_name
]
365 config_host(host_name
, host
)
368 def setup_module(module
):
370 tgen
= Topogen(NetworkTopo
, module
.__name
__)
371 tgen
.start_topology()
373 krel
= platform
.release()
374 if topotest
.version_cmp(krel
, "4.19") < 0:
375 tgen
.errors
= "kernel 4.19 needed for multihoming tests"
376 pytest
.skip(tgen
.errors
)
379 tors
.append("torm11")
380 tors
.append("torm12")
381 tors
.append("torm21")
382 tors
.append("torm22")
383 config_tors(tgen
, tors
)
386 hosts
.append("hostd11")
387 hosts
.append("hostd12")
388 hosts
.append("hostd21")
389 hosts
.append("hostd22")
390 config_hosts(tgen
, hosts
)
393 # This is a sample of configuration loading.
394 router_list
= tgen
.routers()
395 for rname
, router
in router_list
.items():
397 TopoRouter
.RD_ZEBRA
, os
.path
.join(CWD
, "{}/zebra.conf".format(rname
))
400 TopoRouter
.RD_PIM
, os
.path
.join(CWD
, "{}/pim.conf".format(rname
))
403 TopoRouter
.RD_BGP
, os
.path
.join(CWD
, "{}/evpn.conf".format(rname
))
409 def teardown_module(_mod
):
410 "Teardown the pytest environment"
413 # This function tears down the whole topology.
417 def check_local_es(esi
, vtep_ips
, dut_name
, down_vteps
):
419 Check if ES peers are setup correctly on local ESs
422 if "torm1" in dut_name
:
423 tor_ips_rack
= tor_ips_rack_1
425 tor_ips_rack
= tor_ips_rack_2
427 for tor_name
, tor_ip
in tor_ips_rack
.items():
428 if dut_name
not in tor_name
:
429 peer_ips
.append(tor_ip
)
431 # remove down VTEPs from the peer check list
432 peer_set
= set(peer_ips
)
433 down_vtep_set
= set(down_vteps
)
434 peer_set
= peer_set
- down_vtep_set
436 vtep_set
= set(vtep_ips
)
437 diff
= peer_set
.symmetric_difference(vtep_set
)
439 return (esi
, diff
) if diff
else None
442 def check_remote_es(esi
, vtep_ips
, dut_name
, down_vteps
):
444 Verify list of PEs associated with a remote ES
448 if "torm1" in dut_name
:
449 tor_ips_rack
= tor_ips_rack_2
451 tor_ips_rack
= tor_ips_rack_1
453 for tor_name
, tor_ip
in tor_ips_rack
.items():
454 remote_ips
.append(tor_ip
)
456 # remove down VTEPs from the remote check list
457 remote_set
= set(remote_ips
)
458 down_vtep_set
= set(down_vteps
)
459 remote_set
= remote_set
- down_vtep_set
461 vtep_set
= set(vtep_ips
)
462 diff
= remote_set
.symmetric_difference(vtep_set
)
464 return (esi
, diff
) if diff
else None
469 Verify list of PEs associated all ESs, local and remote
471 bgp_es
= dut
.vtysh_cmd("show bgp l2vp evpn es json")
472 bgp_es_json
= json
.loads(bgp_es
)
476 expected_es_set
= set([v
for k
, v
in host_es_map
.items()])
479 # check is ES content is correct
480 for es
in bgp_es_json
:
482 curr_es_set
.append(esi
)
485 for vtep
in es
.get("vteps", []):
486 vtep_ips
.append(vtep
["vtep_ip"])
489 result
= check_local_es(esi
, vtep_ips
, dut
.name
, [])
491 result
= check_remote_es(esi
, vtep_ips
, dut
.name
, [])
496 # check if all ESs are present
497 curr_es_set
= set(curr_es_set
)
498 result
= curr_es_set
.symmetric_difference(expected_es_set
)
500 return result
if result
else None
503 def check_one_es(dut
, esi
, down_vteps
):
505 Verify list of PEs associated all ESs, local and remote
507 bgp_es
= dut
.vtysh_cmd("show bgp l2vp evpn es %s json" % esi
)
508 es
= json
.loads(bgp_es
)
511 return "esi %s not found" % esi
516 for vtep
in es
.get("vteps", []):
517 vtep_ips
.append(vtep
["vtep_ip"])
520 result
= check_local_es(esi
, vtep_ips
, dut
.name
, down_vteps
)
522 result
= check_remote_es(esi
, vtep_ips
, dut
.name
, down_vteps
)
529 Two ES are setup on each rack. This test checks if -
530 1. ES peer has been added to the local ES (via Type-1/EAD route)
531 2. The remote ESs are setup with the right list of PEs (via Type-1)
536 if tgen
.routers_have_failure():
537 pytest
.skip(tgen
.errors
)
540 dut
= tgen
.gears
[dut_name
]
541 test_fn
= partial(check_es
, dut
)
542 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
544 assertmsg
= '"{}" ES content incorrect'.format(dut_name
)
545 assert result
is None, assertmsg
549 def test_evpn_ead_update():
551 Flap a host link one the remote rack and check if the EAD updates
552 are sent/processed for the corresponding ESI
556 if tgen
.routers_have_failure():
557 pytest
.skip(tgen
.errors
)
559 # dut on rack1 and host link flap on rack2
561 dut
= tgen
.gears
[dut_name
]
563 remote_tor_name
= "torm21"
564 remote_tor
= tgen
.gears
[remote_tor_name
]
566 host_name
= "hostd21"
567 host
= tgen
.gears
[host_name
]
568 esi
= host_es_map
.get(host_name
)
570 # check if the VTEP list is right to start with
572 test_fn
= partial(check_one_es
, dut
, esi
, down_vteps
)
573 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
574 assertmsg
= '"{}" ES content incorrect'.format(dut_name
)
575 assert result
is None, assertmsg
577 # down a remote host link and check if the EAD withdraw is rxed
578 # Note: LACP is not working as expected so I am temporarily shutting
579 # down the link on the remote TOR instead of the remote host
580 remote_tor
.run("ip link set dev %s-%s down" % (remote_tor_name
, "eth2"))
581 down_vteps
.append(tor_ips
.get(remote_tor_name
))
582 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
583 assertmsg
= '"{}" ES incorrect after remote link down'.format(dut_name
)
584 assert result
is None, assertmsg
586 # bring up remote host link and check if the EAD update is rxed
587 down_vteps
.remove(tor_ips
.get(remote_tor_name
))
588 remote_tor
.run("ip link set dev %s-%s up" % (remote_tor_name
, "eth2"))
589 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
590 assertmsg
= '"{}" ES incorrect after remote link flap'.format(dut_name
)
591 assert result
is None, assertmsg
596 def ping_anycast_gw(tgen
):
597 local_host
= tgen
.gears
["hostd11"]
598 remote_host
= tgen
.gears
["hostd21"]
600 # ping the anycast gw from the local and remote hosts to populate
601 # the mac address on the PEs
602 cmd_str
= "arping -I torbond -c 1 45.0.0.1"
603 local_host
.run(cmd_str
)
604 remote_host
.run(cmd_str
)
607 def check_mac(dut
, vni
, mac
, m_type
, esi
, intf
, ping_gw
=False, tgen
=None):
609 checks if mac is present and if desination matches the one provided
613 ping_anycast_gw(tgen
)
615 out
= dut
.vtysh_cmd("show evpn mac vni %d mac %s json" % (vni
, mac
))
617 mac_js
= json
.loads(out
)
618 for mac
, info
in mac_js
.items():
619 tmp_esi
= info
.get("esi", "")
620 tmp_m_type
= info
.get("type", "")
621 tmp_intf
= info
.get("intf", "") if tmp_m_type
== "local" else ""
622 if tmp_esi
== esi
and tmp_m_type
== m_type
and intf
== intf
:
625 return "invalid vni %d mac %s out %s" % (vni
, mac
, mac_js
)
630 1. Add a MAC on hostd11 and check if the MAC is synced between
631 torm11 and torm12. And installed as a local MAC.
632 2. Add a MAC on hostd21 and check if the MAC is installed as a
633 remote MAC on torm11 and torm12
638 local_host
= tgen
.gears
["hostd11"]
639 remote_host
= tgen
.gears
["hostd21"]
641 tors
.append(tgen
.gears
["torm11"])
642 tors
.append(tgen
.gears
["torm12"])
646 # check if the rack-1 host MAC is present on all rack-1 PEs
647 # and points to local access port
649 _
, mac
= compute_host_ip_mac(local_host
.name
)
650 esi
= host_es_map
.get(local_host
.name
)
654 test_fn
= partial(check_mac
, tor
, vni
, mac
, m_type
, esi
, intf
, True, tgen
)
655 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
656 assertmsg
= '"{}" local MAC content incorrect'.format(tor
.name
)
657 assert result
is None, assertmsg
659 # check if the rack-2 host MAC is present on all rack-1 PEs
660 # and points to the remote ES destination
662 _
, mac
= compute_host_ip_mac(remote_host
.name
)
663 esi
= host_es_map
.get(remote_host
.name
)
667 test_fn
= partial(check_mac
, tor
, vni
, mac
, m_type
, esi
, intf
)
668 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
669 assertmsg
= '"{}" remote MAC content incorrect'.format(tor
.name
)
670 assert result
is None, assertmsg
673 def check_df_role(dut
, esi
, role
):
675 Return error string if the df role on the dut is different
677 es_json
= dut
.vtysh_cmd("show evpn es %s json" % esi
)
678 es
= json
.loads(es_json
)
681 return "esi %s not found" % esi
683 flags
= es
.get("flags", [])
684 curr_role
= "nonDF" if "nonDF" in flags
else "DF"
686 if curr_role
!= role
:
687 return "%s is %s for %s" % (dut
.name
, curr_role
, esi
)
694 1. Check the DF role on all the PEs on rack-1.
695 2. Increase the DF preference on the non-DF and check if it becomes
701 if tgen
.routers_have_failure():
702 pytest
.skip(tgen
.errors
)
704 # We will run the tests on just one ES
705 esi
= host_es_map
.get("hostd11")
709 tors
.append(tgen
.gears
["torm11"])
710 tors
.append(tgen
.gears
["torm12"])
713 # check roles on rack-1
715 role
= "DF" if tor
.name
== df_node
else "nonDF"
716 test_fn
= partial(check_df_role
, tor
, esi
, role
)
717 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
718 assertmsg
= '"{}" DF role incorrect'.format(tor
.name
)
719 assert result
is None, assertmsg
721 # change df preference on the nonDF to make it the df
722 torm12
= tgen
.gears
["torm12"]
723 torm12
.vtysh_cmd("conf\ninterface %s\nevpn mh es-df-pref %d" % (intf
, 60000))
726 # re-check roles on rack-1; we should have a new winner
728 role
= "DF" if tor
.name
== df_node
else "nonDF"
729 test_fn
= partial(check_df_role
, tor
, esi
, role
)
730 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
731 assertmsg
= '"{}" DF role incorrect'.format(tor
.name
)
732 assert result
is None, assertmsg
737 def check_protodown_rc(dut
, protodown_rc
):
739 check if specified protodown reason code is set
742 out
= dut
.vtysh_cmd("show evpn json")
744 evpn_js
= json
.loads(out
)
745 tmp_rc
= evpn_js
.get("protodownReasons", [])
748 if protodown_rc
not in tmp_rc
:
749 return "protodown %s missing in %s" % (protodown_rc
, tmp_rc
)
752 return "unexpected protodown rc %s" % (tmp_rc
)
757 def test_evpn_uplink_tracking():
759 1. Wait for access ports to come out of startup-delay
760 2. disable uplinks and check if access ports have been protodowned
761 3. enable uplinks and check if access ports have been moved out
768 dut
= tgen
.gears
[dut_name
]
770 # wait for protodown rc to clear after startup
771 test_fn
= partial(check_protodown_rc
, dut
, None)
772 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
773 assertmsg
= '"{}" protodown rc incorrect'.format(dut_name
)
774 assert result
is None, assertmsg
776 # disable the uplinks
777 dut
.run("ip link set %s-eth0 down" % dut_name
)
778 dut
.run("ip link set %s-eth1 down" % dut_name
)
780 # check if the access ports have been protodowned
781 test_fn
= partial(check_protodown_rc
, dut
, "uplinkDown")
782 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
783 assertmsg
= '"{}" protodown rc incorrect'.format(dut_name
)
784 assert result
is None, assertmsg
787 dut
.run("ip link set %s-eth0 up" % dut_name
)
788 dut
.run("ip link set %s-eth1 up" % dut_name
)
790 # check if the access ports have been moved out of protodown
791 test_fn
= partial(check_protodown_rc
, dut
, None)
792 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
793 assertmsg
= '"{}" protodown rc incorrect'.format(dut_name
)
794 assert result
is None, assertmsg
797 if __name__
== "__main__":
798 args
= ["-s"] + sys
.argv
[1:]
799 sys
.exit(pytest
.main(args
))