]>
git.proxmox.com Git - mirror_frr.git/blob - tests/topotests/bgp_evpn_mh/test_evpn_mh.py
6 # Copyright (c) 2020 by
7 # Cumulus Networks, Inc.
10 # Permission to use, copy, modify, and/or distribute this software
11 # for any purpose with or without fee is hereby granted, provided
12 # that the above copyright notice and this permission notice appear
15 # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
16 # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
18 # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
19 # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
20 # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
21 # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
26 test_evpn_mh.py: Testing EVPN multihoming
36 from functools
import partial
38 pytestmark
= [pytest
.mark
.bgpd
, pytest
.mark
.pimd
]
40 # Save the Current Working Directory to find configuration files.
41 CWD
= os
.path
.dirname(os
.path
.realpath(__file__
))
42 sys
.path
.append(os
.path
.join(CWD
, "../"))
44 # pylint: disable=C0413
45 # Import topogen and topotest helpers
46 from lib
import topotest
47 from lib
.topogen
import Topogen
, TopoRouter
, get_topogen
48 from lib
.topolog
import logger
50 # Required to instantiate the topology builder class.
51 from mininet
.topo
import Topo
53 pytestmark
= [pytest
.mark
.bgpd
, pytest
.mark
.pimd
]
56 #####################################################
58 ## Network Topology Definition
60 ## See topology picture at evpn-mh-topo-tests.pdf
61 #####################################################
64 class NetworkTopo(Topo
):
66 EVPN Multihoming Topology -
68 2. Two spine switches - spine1, spine2
69 3. Two racks with Top-of-Rack switches per rack - tormx1, tormx2
70 4. Two dual attached hosts per-rack - hostdx1, hostdx2
73 def build(self
, **_opts
):
76 tgen
= get_topogen(self
)
78 tgen
.add_router("spine1")
79 tgen
.add_router("spine2")
80 tgen
.add_router("torm11")
81 tgen
.add_router("torm12")
82 tgen
.add_router("torm21")
83 tgen
.add_router("torm22")
84 tgen
.add_router("hostd11")
85 tgen
.add_router("hostd12")
86 tgen
.add_router("hostd21")
87 tgen
.add_router("hostd22")
90 # First switch is for a dummy interface (for local network)
92 ##################### spine1 ########################
93 # spine1-eth0 is connected to torm11-eth0
94 switch
= tgen
.add_switch("sw1")
95 switch
.add_link(tgen
.gears
["spine1"])
96 switch
.add_link(tgen
.gears
["torm11"])
98 # spine1-eth1 is connected to torm12-eth0
99 switch
= tgen
.add_switch("sw2")
100 switch
.add_link(tgen
.gears
["spine1"])
101 switch
.add_link(tgen
.gears
["torm12"])
103 # spine1-eth2 is connected to torm21-eth0
104 switch
= tgen
.add_switch("sw3")
105 switch
.add_link(tgen
.gears
["spine1"])
106 switch
.add_link(tgen
.gears
["torm21"])
108 # spine1-eth3 is connected to torm22-eth0
109 switch
= tgen
.add_switch("sw4")
110 switch
.add_link(tgen
.gears
["spine1"])
111 switch
.add_link(tgen
.gears
["torm22"])
113 ##################### spine2 ########################
114 # spine2-eth0 is connected to torm11-eth1
115 switch
= tgen
.add_switch("sw5")
116 switch
.add_link(tgen
.gears
["spine2"])
117 switch
.add_link(tgen
.gears
["torm11"])
119 # spine2-eth1 is connected to torm12-eth1
120 switch
= tgen
.add_switch("sw6")
121 switch
.add_link(tgen
.gears
["spine2"])
122 switch
.add_link(tgen
.gears
["torm12"])
124 # spine2-eth2 is connected to torm21-eth1
125 switch
= tgen
.add_switch("sw7")
126 switch
.add_link(tgen
.gears
["spine2"])
127 switch
.add_link(tgen
.gears
["torm21"])
129 # spine2-eth3 is connected to torm22-eth1
130 switch
= tgen
.add_switch("sw8")
131 switch
.add_link(tgen
.gears
["spine2"])
132 switch
.add_link(tgen
.gears
["torm22"])
134 ##################### torm11 ########################
135 # torm11-eth2 is connected to hostd11-eth0
136 switch
= tgen
.add_switch("sw9")
137 switch
.add_link(tgen
.gears
["torm11"])
138 switch
.add_link(tgen
.gears
["hostd11"])
140 # torm11-eth3 is connected to hostd12-eth0
141 switch
= tgen
.add_switch("sw10")
142 switch
.add_link(tgen
.gears
["torm11"])
143 switch
.add_link(tgen
.gears
["hostd12"])
145 ##################### torm12 ########################
146 # torm12-eth2 is connected to hostd11-eth1
147 switch
= tgen
.add_switch("sw11")
148 switch
.add_link(tgen
.gears
["torm12"])
149 switch
.add_link(tgen
.gears
["hostd11"])
151 # torm12-eth3 is connected to hostd12-eth1
152 switch
= tgen
.add_switch("sw12")
153 switch
.add_link(tgen
.gears
["torm12"])
154 switch
.add_link(tgen
.gears
["hostd12"])
156 ##################### torm21 ########################
157 # torm21-eth2 is connected to hostd21-eth0
158 switch
= tgen
.add_switch("sw13")
159 switch
.add_link(tgen
.gears
["torm21"])
160 switch
.add_link(tgen
.gears
["hostd21"])
162 # torm21-eth3 is connected to hostd22-eth0
163 switch
= tgen
.add_switch("sw14")
164 switch
.add_link(tgen
.gears
["torm21"])
165 switch
.add_link(tgen
.gears
["hostd22"])
167 ##################### torm22 ########################
168 # torm22-eth2 is connected to hostd21-eth1
169 switch
= tgen
.add_switch("sw15")
170 switch
.add_link(tgen
.gears
["torm22"])
171 switch
.add_link(tgen
.gears
["hostd21"])
173 # torm22-eth3 is connected to hostd22-eth1
174 switch
= tgen
.add_switch("sw16")
175 switch
.add_link(tgen
.gears
["torm22"])
176 switch
.add_link(tgen
.gears
["hostd22"])
179 #####################################################
183 #####################################################
186 "torm11": "192.168.100.15",
187 "torm12": "192.168.100.16",
188 "torm21": "192.168.100.17",
189 "torm22": "192.168.100.18",
193 "torm11": "45.0.0.2",
194 "torm12": "45.0.0.3",
195 "torm21": "45.0.0.4",
196 "torm22": "45.0.0.5",
199 tor_ips_rack_1
= {"torm11": "192.168.100.15", "torm12": "192.168.100.16"}
201 tor_ips_rack_2
= {"torm21": "192.168.100.17", "torm22": "192.168.100.18"}
204 "hostd11": "03:44:38:39:ff:ff:01:00:00:01",
205 "hostd12": "03:44:38:39:ff:ff:01:00:00:02",
206 "hostd21": "03:44:38:39:ff:ff:02:00:00:01",
207 "hostd22": "03:44:38:39:ff:ff:02:00:00:02",
211 def config_bond(node
, bond_name
, bond_members
, bond_ad_sys_mac
, br
):
213 Used to setup bonds on the TORs and hosts for MH
215 node
.run("ip link add dev %s type bond mode 802.3ad" % bond_name
)
216 node
.run("ip link set dev %s type bond lacp_rate 1" % bond_name
)
217 node
.run("ip link set dev %s type bond miimon 100" % bond_name
)
218 node
.run("ip link set dev %s type bond xmit_hash_policy layer3+4" % bond_name
)
219 node
.run("ip link set dev %s type bond min_links 1" % bond_name
)
221 "ip link set dev %s type bond ad_actor_system %s" % (bond_name
, bond_ad_sys_mac
)
224 for bond_member
in bond_members
:
225 node
.run("ip link set dev %s down" % bond_member
)
226 node
.run("ip link set dev %s master %s" % (bond_member
, bond_name
))
227 node
.run("ip link set dev %s up" % bond_member
)
229 node
.run("ip link set dev %s up" % bond_name
)
231 # if bridge is specified add the bond as a bridge member
233 node
.run(" ip link set dev %s master bridge" % bond_name
)
234 node
.run("/sbin/bridge link set dev %s priority 8" % bond_name
)
235 node
.run("/sbin/bridge vlan del vid 1 dev %s" % bond_name
)
236 node
.run("/sbin/bridge vlan del vid 1 untagged pvid dev %s" % bond_name
)
237 node
.run("/sbin/bridge vlan add vid 1000 dev %s" % bond_name
)
238 node
.run("/sbin/bridge vlan add vid 1000 untagged pvid dev %s" % bond_name
)
241 def config_mcast_tunnel_termination_device(node
):
243 The kernel requires a device to terminate VxLAN multicast tunnels
244 when EVPN-PIM is used for flooded traffic
246 node
.run("ip link add dev ipmr-lo type dummy")
247 node
.run("ip link set dev ipmr-lo mtu 16000")
248 node
.run("ip link set dev ipmr-lo mode dormant")
249 node
.run("ip link set dev ipmr-lo up")
252 def config_bridge(node
):
254 Create a VLAN aware bridge
256 node
.run("ip link add dev bridge type bridge stp_state 0")
257 node
.run("ip link set dev bridge type bridge vlan_filtering 1")
258 node
.run("ip link set dev bridge mtu 9216")
259 node
.run("ip link set dev bridge type bridge ageing_time 1800")
260 node
.run("ip link set dev bridge type bridge mcast_snooping 0")
261 node
.run("ip link set dev bridge type bridge vlan_stats_enabled 1")
262 node
.run("ip link set dev bridge up")
263 node
.run("/sbin/bridge vlan add vid 1000 dev bridge")
266 def config_vxlan(node
, node_ip
):
268 Create a VxLAN device for VNI 1000 and add it to the bridge.
269 VLAN-1000 is mapped to VNI-1000.
271 node
.run("ip link add dev vx-1000 type vxlan id 1000 dstport 4789")
272 node
.run("ip link set dev vx-1000 type vxlan nolearning")
273 node
.run("ip link set dev vx-1000 type vxlan local %s" % node_ip
)
274 node
.run("ip link set dev vx-1000 type vxlan ttl 64")
275 node
.run("ip link set dev vx-1000 mtu 9152")
276 node
.run("ip link set dev vx-1000 type vxlan dev ipmr-lo group 239.1.1.100")
277 node
.run("ip link set dev vx-1000 up")
280 node
.run("ip link set dev vx-1000 master bridge")
281 node
.run("/sbin/bridge link set dev vx-1000 neigh_suppress on")
282 node
.run("/sbin/bridge link set dev vx-1000 learning off")
283 node
.run("/sbin/bridge link set dev vx-1000 priority 8")
284 node
.run("/sbin/bridge vlan del vid 1 dev vx-1000")
285 node
.run("/sbin/bridge vlan del vid 1 untagged pvid dev vx-1000")
286 node
.run("/sbin/bridge vlan add vid 1000 dev vx-1000")
287 node
.run("/sbin/bridge vlan add vid 1000 untagged pvid dev vx-1000")
290 def config_svi(node
, svi_pip
):
292 Create an SVI for VLAN 1000
294 node
.run("ip link add link bridge name vlan1000 type vlan id 1000 protocol 802.1q")
295 node
.run("ip addr add %s/24 dev vlan1000" % svi_pip
)
296 node
.run("ip link set dev vlan1000 up")
297 node
.run("/sbin/sysctl net.ipv4.conf.vlan1000.arp_accept=1")
298 node
.run("ip link add link vlan1000 name vlan1000-v0 type macvlan mode private")
299 node
.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.accept_dad=0")
300 node
.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits")
301 node
.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits=0")
302 node
.run("ip link set dev vlan1000-v0 address 00:00:5e:00:01:01")
303 node
.run("ip link set dev vlan1000-v0 up")
304 # metric 1024 is not working
305 node
.run("ip addr add 45.0.0.1/24 dev vlan1000-v0")
308 def config_tor(tor_name
, tor
, tor_ip
, svi_pip
):
310 Create the bond/vxlan-bridge on the TOR which acts as VTEP and EPN-PE
312 # create a device for terminating VxLAN multicast tunnels
313 config_mcast_tunnel_termination_device(tor
)
315 # create a vlan aware bridge
318 # create vxlan device and add it to bridge
319 config_vxlan(tor
, tor_ip
)
321 # create hostbonds and add them to the bridge
322 if "torm1" in tor_name
:
323 sys_mac
= "44:38:39:ff:ff:01"
325 sys_mac
= "44:38:39:ff:ff:02"
326 bond_member
= tor_name
+ "-eth2"
327 config_bond(tor
, "hostbond1", [bond_member
], sys_mac
, "bridge")
329 bond_member
= tor_name
+ "-eth3"
330 config_bond(tor
, "hostbond2", [bond_member
], sys_mac
, "bridge")
333 config_svi(tor
, svi_pip
)
336 def config_tors(tgen
, tors
):
337 for tor_name
in tors
:
338 tor
= tgen
.gears
[tor_name
]
339 config_tor(tor_name
, tor
, tor_ips
.get(tor_name
), svi_ips
.get(tor_name
))
342 def compute_host_ip_mac(host_name
):
343 host_id
= host_name
.split("hostd")[1]
344 host_ip
= "45.0.0." + host_id
+ "/24"
345 host_mac
= "00:00:00:00:00:" + host_id
347 return host_ip
, host_mac
350 def config_host(host_name
, host
):
352 Create the dual-attached bond on host nodes for MH
355 bond_members
.append(host_name
+ "-eth0")
356 bond_members
.append(host_name
+ "-eth1")
357 bond_name
= "torbond"
358 config_bond(host
, bond_name
, bond_members
, "00:00:00:00:00:00", None)
360 host_ip
, host_mac
= compute_host_ip_mac(host_name
)
361 host
.run("ip addr add %s dev %s" % (host_ip
, bond_name
))
362 host
.run("ip link set dev %s address %s" % (bond_name
, host_mac
))
365 def config_hosts(tgen
, hosts
):
366 for host_name
in hosts
:
367 host
= tgen
.gears
[host_name
]
368 config_host(host_name
, host
)
371 def setup_module(module
):
373 tgen
= Topogen(NetworkTopo
, module
.__name
__)
374 tgen
.start_topology()
376 krel
= platform
.release()
377 if topotest
.version_cmp(krel
, "4.19") < 0:
378 tgen
.errors
= "kernel 4.19 needed for multihoming tests"
379 pytest
.skip(tgen
.errors
)
382 tors
.append("torm11")
383 tors
.append("torm12")
384 tors
.append("torm21")
385 tors
.append("torm22")
386 config_tors(tgen
, tors
)
389 hosts
.append("hostd11")
390 hosts
.append("hostd12")
391 hosts
.append("hostd21")
392 hosts
.append("hostd22")
393 config_hosts(tgen
, hosts
)
396 # This is a sample of configuration loading.
397 router_list
= tgen
.routers()
398 for rname
, router
in router_list
.items():
400 TopoRouter
.RD_ZEBRA
, os
.path
.join(CWD
, "{}/zebra.conf".format(rname
))
403 TopoRouter
.RD_PIM
, os
.path
.join(CWD
, "{}/pim.conf".format(rname
))
406 TopoRouter
.RD_BGP
, os
.path
.join(CWD
, "{}/evpn.conf".format(rname
))
412 def teardown_module(_mod
):
413 "Teardown the pytest environment"
416 # This function tears down the whole topology.
420 def check_local_es(esi
, vtep_ips
, dut_name
, down_vteps
):
422 Check if ES peers are setup correctly on local ESs
425 if "torm1" in dut_name
:
426 tor_ips_rack
= tor_ips_rack_1
428 tor_ips_rack
= tor_ips_rack_2
430 for tor_name
, tor_ip
in tor_ips_rack
.items():
431 if dut_name
not in tor_name
:
432 peer_ips
.append(tor_ip
)
434 # remove down VTEPs from the peer check list
435 peer_set
= set(peer_ips
)
436 down_vtep_set
= set(down_vteps
)
437 peer_set
= peer_set
- down_vtep_set
439 vtep_set
= set(vtep_ips
)
440 diff
= peer_set
.symmetric_difference(vtep_set
)
442 return (esi
, diff
) if diff
else None
445 def check_remote_es(esi
, vtep_ips
, dut_name
, down_vteps
):
447 Verify list of PEs associated with a remote ES
451 if "torm1" in dut_name
:
452 tor_ips_rack
= tor_ips_rack_2
454 tor_ips_rack
= tor_ips_rack_1
456 for tor_name
, tor_ip
in tor_ips_rack
.items():
457 remote_ips
.append(tor_ip
)
459 # remove down VTEPs from the remote check list
460 remote_set
= set(remote_ips
)
461 down_vtep_set
= set(down_vteps
)
462 remote_set
= remote_set
- down_vtep_set
464 vtep_set
= set(vtep_ips
)
465 diff
= remote_set
.symmetric_difference(vtep_set
)
467 return (esi
, diff
) if diff
else None
472 Verify list of PEs associated all ESs, local and remote
474 bgp_es
= dut
.vtysh_cmd("show bgp l2vp evpn es json")
475 bgp_es_json
= json
.loads(bgp_es
)
479 expected_es_set
= set([v
for k
, v
in host_es_map
.items()])
482 # check is ES content is correct
483 for es
in bgp_es_json
:
485 curr_es_set
.append(esi
)
488 for vtep
in es
.get("vteps", []):
489 vtep_ips
.append(vtep
["vtep_ip"])
492 result
= check_local_es(esi
, vtep_ips
, dut
.name
, [])
494 result
= check_remote_es(esi
, vtep_ips
, dut
.name
, [])
499 # check if all ESs are present
500 curr_es_set
= set(curr_es_set
)
501 result
= curr_es_set
.symmetric_difference(expected_es_set
)
503 return result
if result
else None
506 def check_one_es(dut
, esi
, down_vteps
):
508 Verify list of PEs associated all ESs, local and remote
510 bgp_es
= dut
.vtysh_cmd("show bgp l2vp evpn es %s json" % esi
)
511 es
= json
.loads(bgp_es
)
514 return "esi %s not found" % esi
519 for vtep
in es
.get("vteps", []):
520 vtep_ips
.append(vtep
["vtep_ip"])
523 result
= check_local_es(esi
, vtep_ips
, dut
.name
, down_vteps
)
525 result
= check_remote_es(esi
, vtep_ips
, dut
.name
, down_vteps
)
532 Two ES are setup on each rack. This test checks if -
533 1. ES peer has been added to the local ES (via Type-1/EAD route)
534 2. The remote ESs are setup with the right list of PEs (via Type-1)
539 if tgen
.routers_have_failure():
540 pytest
.skip(tgen
.errors
)
543 dut
= tgen
.gears
[dut_name
]
544 test_fn
= partial(check_es
, dut
)
545 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
547 assertmsg
= '"{}" ES content incorrect'.format(dut_name
)
548 assert result
is None, assertmsg
552 def test_evpn_ead_update():
554 Flap a host link one the remote rack and check if the EAD updates
555 are sent/processed for the corresponding ESI
559 if tgen
.routers_have_failure():
560 pytest
.skip(tgen
.errors
)
562 # dut on rack1 and host link flap on rack2
564 dut
= tgen
.gears
[dut_name
]
566 remote_tor_name
= "torm21"
567 remote_tor
= tgen
.gears
[remote_tor_name
]
569 host_name
= "hostd21"
570 host
= tgen
.gears
[host_name
]
571 esi
= host_es_map
.get(host_name
)
573 # check if the VTEP list is right to start with
575 test_fn
= partial(check_one_es
, dut
, esi
, down_vteps
)
576 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
577 assertmsg
= '"{}" ES content incorrect'.format(dut_name
)
578 assert result
is None, assertmsg
580 # down a remote host link and check if the EAD withdraw is rxed
581 # Note: LACP is not working as expected so I am temporarily shutting
582 # down the link on the remote TOR instead of the remote host
583 remote_tor
.run("ip link set dev %s-%s down" % (remote_tor_name
, "eth2"))
584 down_vteps
.append(tor_ips
.get(remote_tor_name
))
585 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
586 assertmsg
= '"{}" ES incorrect after remote link down'.format(dut_name
)
587 assert result
is None, assertmsg
589 # bring up remote host link and check if the EAD update is rxed
590 down_vteps
.remove(tor_ips
.get(remote_tor_name
))
591 remote_tor
.run("ip link set dev %s-%s up" % (remote_tor_name
, "eth2"))
592 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
593 assertmsg
= '"{}" ES incorrect after remote link flap'.format(dut_name
)
594 assert result
is None, assertmsg
599 def ping_anycast_gw(tgen
):
600 # ping the anycast gw from the local and remote hosts to populate
601 # the mac address on the PEs
602 script_path
= os
.path
.abspath(os
.path
.join(CWD
, "../lib/scapy_sendpkt.py"))
607 "--imports=Ether,ARP",
608 "--interface=" + intf
,
609 "'Ether(dst=\"ff:ff:ff:ff:ff:ff\")/ARP(pdst=\"{}\")'".format(ipaddr
)
611 for name
in ("hostd11", "hostd21"):
612 host
= tgen
.net
[name
]
613 stdout
= host
.cmd(ping_cmd
)
614 stdout
= stdout
.strip()
616 host
.logger
.debug("%s: arping on %s for %s returned: %s", name
, intf
, ipaddr
, stdout
)
619 def check_mac(dut
, vni
, mac
, m_type
, esi
, intf
, ping_gw
=False, tgen
=None):
621 checks if mac is present and if desination matches the one provided
625 ping_anycast_gw(tgen
)
627 out
= dut
.vtysh_cmd("show evpn mac vni %d mac %s json" % (vni
, mac
))
629 mac_js
= json
.loads(out
)
630 for mac
, info
in mac_js
.items():
631 tmp_esi
= info
.get("esi", "")
632 tmp_m_type
= info
.get("type", "")
633 tmp_intf
= info
.get("intf", "") if tmp_m_type
== "local" else ""
634 if tmp_esi
== esi
and tmp_m_type
== m_type
and intf
== intf
:
637 return "invalid vni %d mac %s out %s" % (vni
, mac
, mac_js
)
642 1. Add a MAC on hostd11 and check if the MAC is synced between
643 torm11 and torm12. And installed as a local MAC.
644 2. Add a MAC on hostd21 and check if the MAC is installed as a
645 remote MAC on torm11 and torm12
650 local_host
= tgen
.gears
["hostd11"]
651 remote_host
= tgen
.gears
["hostd21"]
653 tors
.append(tgen
.gears
["torm11"])
654 tors
.append(tgen
.gears
["torm12"])
658 # check if the rack-1 host MAC is present on all rack-1 PEs
659 # and points to local access port
661 _
, mac
= compute_host_ip_mac(local_host
.name
)
662 esi
= host_es_map
.get(local_host
.name
)
666 test_fn
= partial(check_mac
, tor
, vni
, mac
, m_type
, esi
, intf
, True, tgen
)
667 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
668 assertmsg
= '"{}" local MAC content incorrect'.format(tor
.name
)
669 assert result
is None, assertmsg
671 # check if the rack-2 host MAC is present on all rack-1 PEs
672 # and points to the remote ES destination
674 _
, mac
= compute_host_ip_mac(remote_host
.name
)
675 esi
= host_es_map
.get(remote_host
.name
)
679 test_fn
= partial(check_mac
, tor
, vni
, mac
, m_type
, esi
, intf
)
680 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
681 assertmsg
= '"{}" remote MAC content incorrect'.format(tor
.name
)
682 assert result
is None, assertmsg
685 def check_df_role(dut
, esi
, role
):
687 Return error string if the df role on the dut is different
689 es_json
= dut
.vtysh_cmd("show evpn es %s json" % esi
)
690 es
= json
.loads(es_json
)
693 return "esi %s not found" % esi
695 flags
= es
.get("flags", [])
696 curr_role
= "nonDF" if "nonDF" in flags
else "DF"
698 if curr_role
!= role
:
699 return "%s is %s for %s" % (dut
.name
, curr_role
, esi
)
706 1. Check the DF role on all the PEs on rack-1.
707 2. Increase the DF preference on the non-DF and check if it becomes
713 if tgen
.routers_have_failure():
714 pytest
.skip(tgen
.errors
)
716 # We will run the tests on just one ES
717 esi
= host_es_map
.get("hostd11")
721 tors
.append(tgen
.gears
["torm11"])
722 tors
.append(tgen
.gears
["torm12"])
725 # check roles on rack-1
727 role
= "DF" if tor
.name
== df_node
else "nonDF"
728 test_fn
= partial(check_df_role
, tor
, esi
, role
)
729 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
730 assertmsg
= '"{}" DF role incorrect'.format(tor
.name
)
731 assert result
is None, assertmsg
733 # change df preference on the nonDF to make it the df
734 torm12
= tgen
.gears
["torm12"]
735 torm12
.vtysh_cmd("conf\ninterface %s\nevpn mh es-df-pref %d" % (intf
, 60000))
738 # re-check roles on rack-1; we should have a new winner
740 role
= "DF" if tor
.name
== df_node
else "nonDF"
741 test_fn
= partial(check_df_role
, tor
, esi
, role
)
742 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
743 assertmsg
= '"{}" DF role incorrect'.format(tor
.name
)
744 assert result
is None, assertmsg
749 def check_protodown_rc(dut
, protodown_rc
):
751 check if specified protodown reason code is set
754 out
= dut
.vtysh_cmd("show evpn json")
756 evpn_js
= json
.loads(out
)
757 tmp_rc
= evpn_js
.get("protodownReasons", [])
760 if protodown_rc
not in tmp_rc
:
761 return "protodown %s missing in %s" % (protodown_rc
, tmp_rc
)
764 return "unexpected protodown rc %s" % (tmp_rc
)
769 def test_evpn_uplink_tracking():
771 1. Wait for access ports to come out of startup-delay
772 2. disable uplinks and check if access ports have been protodowned
773 3. enable uplinks and check if access ports have been moved out
780 dut
= tgen
.gears
[dut_name
]
782 # wait for protodown rc to clear after startup
783 test_fn
= partial(check_protodown_rc
, dut
, None)
784 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
785 assertmsg
= '"{}" protodown rc incorrect'.format(dut_name
)
786 assert result
is None, assertmsg
788 # disable the uplinks
789 dut
.run("ip link set %s-eth0 down" % dut_name
)
790 dut
.run("ip link set %s-eth1 down" % dut_name
)
792 # check if the access ports have been protodowned
793 test_fn
= partial(check_protodown_rc
, dut
, "uplinkDown")
794 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
795 assertmsg
= '"{}" protodown rc incorrect'.format(dut_name
)
796 assert result
is None, assertmsg
799 dut
.run("ip link set %s-eth0 up" % dut_name
)
800 dut
.run("ip link set %s-eth1 up" % dut_name
)
802 # check if the access ports have been moved out of protodown
803 test_fn
= partial(check_protodown_rc
, dut
, None)
804 _
, result
= topotest
.run_and_expect(test_fn
, None, count
=20, wait
=3)
805 assertmsg
= '"{}" protodown rc incorrect'.format(dut_name
)
806 assert result
is None, assertmsg
809 if __name__
== "__main__":
810 args
= ["-s"] + sys
.argv
[1:]
811 sys
.exit(pytest
.main(args
))