]>
Commit | Line | Data |
---|---|---|
df98b92c AK |
1 | #!/usr/bin/env python |
2 | ||
3 | # | |
4 | # test_evpn_mh.py | |
5 | # | |
6 | # Copyright (c) 2020 by | |
7 | # Cumulus Networks, Inc. | |
8 | # Anuradha Karuppiah | |
9 | # | |
10 | # Permission to use, copy, modify, and/or distribute this software | |
11 | # for any purpose with or without fee is hereby granted, provided | |
12 | # that the above copyright notice and this permission notice appear | |
13 | # in all copies. | |
14 | # | |
15 | # THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES | |
16 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
17 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR | |
18 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY | |
19 | # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, | |
20 | # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS | |
21 | # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | |
22 | # OF THIS SOFTWARE. | |
23 | # | |
24 | ||
25 | """ | |
26 | test_evpn_mh.py: Testing EVPN multihoming | |
27 | ||
28 | """ | |
29 | ||
30 | import os | |
31 | import re | |
32 | import sys | |
33 | import pytest | |
34 | import json | |
35 | import platform | |
36 | from functools import partial | |
37 | ||
38 | # Save the Current Working Directory to find configuration files. | |
39 | CWD = os.path.dirname(os.path.realpath(__file__)) | |
40 | sys.path.append(os.path.join(CWD, "../")) | |
41 | ||
42 | # pylint: disable=C0413 | |
43 | # Import topogen and topotest helpers | |
44 | from lib import topotest | |
45 | from lib.topogen import Topogen, TopoRouter, get_topogen | |
46 | from lib.topolog import logger | |
47 | ||
48 | # Required to instantiate the topology builder class. | |
49 | from mininet.topo import Topo | |
50 | ||
51 | ##################################################### | |
52 | ## | |
53 | ## Network Topology Definition | |
54 | ## | |
55 | ## See topology picture at evpn-mh-topo-tests.pdf | |
56 | ##################################################### | |
57 | ||
58 | ||
59 | class NetworkTopo(Topo): | |
60 | ''' | |
61 | EVPN Multihoming Topology - | |
62 | 1. Two level CLOS | |
63 | 2. Two spine switches - spine1, spine2 | |
64 | 3. Two racks with Top-of-Rack switches per rack - tormx1, tormx2 | |
65 | 4. Two dual attached hosts per-rack - hostdx1, hostdx2 | |
66 | ''' | |
67 | ||
68 | def build(self, **_opts): | |
69 | "Build function" | |
70 | ||
71 | tgen = get_topogen(self) | |
72 | ||
73 | tgen.add_router("spine1") | |
74 | tgen.add_router("spine2") | |
75 | tgen.add_router("torm11") | |
76 | tgen.add_router("torm12") | |
77 | tgen.add_router("torm21") | |
78 | tgen.add_router("torm22") | |
79 | tgen.add_router("hostd11") | |
80 | tgen.add_router("hostd12") | |
81 | tgen.add_router("hostd21") | |
82 | tgen.add_router("hostd22") | |
83 | ||
84 | # On main router | |
85 | # First switch is for a dummy interface (for local network) | |
86 | ||
87 | ||
88 | ##################### spine1 ######################## | |
89 | # spine1-eth0 is connected to torm11-eth0 | |
90 | switch = tgen.add_switch("sw1") | |
91 | switch.add_link(tgen.gears["spine1"]) | |
92 | switch.add_link(tgen.gears["torm11"]) | |
93 | ||
94 | # spine1-eth1 is connected to torm12-eth0 | |
95 | switch = tgen.add_switch("sw2") | |
96 | switch.add_link(tgen.gears["spine1"]) | |
97 | switch.add_link(tgen.gears["torm12"]) | |
98 | ||
99 | # spine1-eth2 is connected to torm21-eth0 | |
100 | switch = tgen.add_switch("sw3") | |
101 | switch.add_link(tgen.gears["spine1"]) | |
102 | switch.add_link(tgen.gears["torm21"]) | |
103 | ||
104 | # spine1-eth3 is connected to torm22-eth0 | |
105 | switch = tgen.add_switch("sw4") | |
106 | switch.add_link(tgen.gears["spine1"]) | |
107 | switch.add_link(tgen.gears["torm22"]) | |
108 | ||
109 | ##################### spine2 ######################## | |
110 | # spine2-eth0 is connected to torm11-eth1 | |
111 | switch = tgen.add_switch("sw5") | |
112 | switch.add_link(tgen.gears["spine2"]) | |
113 | switch.add_link(tgen.gears["torm11"]) | |
114 | ||
115 | # spine2-eth1 is connected to torm12-eth1 | |
116 | switch = tgen.add_switch("sw6") | |
117 | switch.add_link(tgen.gears["spine2"]) | |
118 | switch.add_link(tgen.gears["torm12"]) | |
119 | ||
120 | # spine2-eth2 is connected to torm21-eth1 | |
121 | switch = tgen.add_switch("sw7") | |
122 | switch.add_link(tgen.gears["spine2"]) | |
123 | switch.add_link(tgen.gears["torm21"]) | |
124 | ||
125 | # spine2-eth3 is connected to torm22-eth1 | |
126 | switch = tgen.add_switch("sw8") | |
127 | switch.add_link(tgen.gears["spine2"]) | |
128 | switch.add_link(tgen.gears["torm22"]) | |
129 | ||
130 | ##################### torm11 ######################## | |
131 | # torm11-eth2 is connected to hostd11-eth0 | |
132 | switch = tgen.add_switch("sw9") | |
133 | switch.add_link(tgen.gears["torm11"]) | |
134 | switch.add_link(tgen.gears["hostd11"]) | |
135 | ||
136 | # torm11-eth3 is connected to hostd12-eth0 | |
137 | switch = tgen.add_switch("sw10") | |
138 | switch.add_link(tgen.gears["torm11"]) | |
139 | switch.add_link(tgen.gears["hostd12"]) | |
140 | ||
141 | ##################### torm12 ######################## | |
142 | # torm12-eth2 is connected to hostd11-eth1 | |
143 | switch = tgen.add_switch("sw11") | |
144 | switch.add_link(tgen.gears["torm12"]) | |
145 | switch.add_link(tgen.gears["hostd11"]) | |
146 | ||
147 | # torm12-eth3 is connected to hostd12-eth1 | |
148 | switch = tgen.add_switch("sw12") | |
149 | switch.add_link(tgen.gears["torm12"]) | |
150 | switch.add_link(tgen.gears["hostd12"]) | |
151 | ||
152 | ##################### torm21 ######################## | |
153 | # torm21-eth2 is connected to hostd21-eth0 | |
154 | switch = tgen.add_switch("sw13") | |
155 | switch.add_link(tgen.gears["torm21"]) | |
156 | switch.add_link(tgen.gears["hostd21"]) | |
157 | ||
158 | # torm21-eth3 is connected to hostd22-eth0 | |
159 | switch = tgen.add_switch("sw14") | |
160 | switch.add_link(tgen.gears["torm21"]) | |
161 | switch.add_link(tgen.gears["hostd22"]) | |
162 | ||
163 | ##################### torm22 ######################## | |
164 | # torm22-eth2 is connected to hostd21-eth1 | |
165 | switch = tgen.add_switch("sw15") | |
166 | switch.add_link(tgen.gears["torm22"]) | |
167 | switch.add_link(tgen.gears["hostd21"]) | |
168 | ||
169 | # torm22-eth3 is connected to hostd22-eth1 | |
170 | switch = tgen.add_switch("sw16") | |
171 | switch.add_link(tgen.gears["torm22"]) | |
172 | switch.add_link(tgen.gears["hostd22"]) | |
173 | ||
174 | ||
175 | ##################################################### | |
176 | ## | |
177 | ## Tests starting | |
178 | ## | |
179 | ##################################################### | |
180 | ||
181 | tor_ips = {"torm11" : "192.168.100.15", \ | |
182 | "torm12" : "192.168.100.16", \ | |
183 | "torm21" : "192.168.100.17", \ | |
184 | "torm22" : "192.168.100.18"} | |
185 | ||
186 | svi_ips = {"torm11" : "45.0.0.2", \ | |
187 | "torm12" : "45.0.0.3", \ | |
188 | "torm21" : "45.0.0.4", \ | |
189 | "torm22" : "45.0.0.5"} | |
190 | ||
191 | tor_ips_rack_1 = {"torm11" : "192.168.100.15", \ | |
192 | "torm12" : "192.168.100.16"} | |
193 | ||
194 | tor_ips_rack_2 = {"torm21" : "192.168.100.17", \ | |
195 | "torm22" : "192.168.100.18"} | |
196 | ||
197 | host_es_map = {"hostd11" : "03:44:38:39:ff:ff:01:00:00:01", | |
198 | "hostd12" : "03:44:38:39:ff:ff:01:00:00:02", | |
199 | "hostd21" : "03:44:38:39:ff:ff:02:00:00:01", | |
200 | "hostd22" : "03:44:38:39:ff:ff:02:00:00:02"} | |
201 | ||
202 | def config_bond(node, bond_name, bond_members, bond_ad_sys_mac, br): | |
203 | ''' | |
204 | Used to setup bonds on the TORs and hosts for MH | |
205 | ''' | |
206 | node.run("ip link add dev %s type bond mode 802.3ad" % bond_name) | |
207 | node.run("ip link set dev %s type bond lacp_rate 1" % bond_name) | |
208 | node.run("ip link set dev %s type bond miimon 100" % bond_name) | |
209 | node.run("ip link set dev %s type bond xmit_hash_policy layer3+4" % bond_name) | |
210 | node.run("ip link set dev %s type bond min_links 1" % bond_name) | |
211 | node.run("ip link set dev %s type bond ad_actor_system %s" %\ | |
212 | (bond_name, bond_ad_sys_mac)) | |
213 | ||
214 | for bond_member in bond_members: | |
215 | node.run("ip link set dev %s down" % bond_member) | |
216 | node.run("ip link set dev %s master %s" % (bond_member, bond_name)) | |
217 | node.run("ip link set dev %s up" % bond_member) | |
218 | ||
219 | node.run("ip link set dev %s up" % bond_name) | |
220 | ||
221 | # if bridge is specified add the bond as a bridge member | |
222 | if br: | |
223 | node.run(" ip link set dev %s master bridge" % bond_name) | |
224 | node.run("/sbin/bridge link set dev %s priority 8" % bond_name) | |
225 | node.run("/sbin/bridge vlan del vid 1 dev %s" % bond_name) | |
226 | node.run("/sbin/bridge vlan del vid 1 untagged pvid dev %s" % bond_name) | |
227 | node.run("/sbin/bridge vlan add vid 1000 dev %s" % bond_name) | |
228 | node.run("/sbin/bridge vlan add vid 1000 untagged pvid dev %s"\ | |
229 | % bond_name) | |
230 | ||
231 | ||
232 | def config_mcast_tunnel_termination_device(node): | |
233 | ''' | |
234 | The kernel requires a device to terminate VxLAN multicast tunnels | |
235 | when EVPN-PIM is used for flooded traffic | |
236 | ''' | |
237 | node.run("ip link add dev ipmr-lo type dummy") | |
238 | node.run("ip link set dev ipmr-lo mtu 16000") | |
239 | node.run("ip link set dev ipmr-lo mode dormant") | |
240 | node.run("ip link set dev ipmr-lo up") | |
241 | ||
242 | ||
243 | def config_bridge(node): | |
244 | ''' | |
245 | Create a VLAN aware bridge | |
246 | ''' | |
247 | node.run("ip link add dev bridge type bridge stp_state 0") | |
248 | node.run("ip link set dev bridge type bridge vlan_filtering 1") | |
249 | node.run("ip link set dev bridge mtu 9216") | |
250 | node.run("ip link set dev bridge type bridge ageing_time 1800") | |
251 | node.run("ip link set dev bridge type bridge mcast_snooping 0") | |
252 | node.run("ip link set dev bridge type bridge vlan_stats_enabled 1") | |
253 | node.run("ip link set dev bridge up") | |
254 | node.run("/sbin/bridge vlan add vid 1000 dev bridge") | |
255 | ||
256 | ||
257 | def config_vxlan(node, node_ip): | |
258 | ''' | |
259 | Create a VxLAN device for VNI 1000 and add it to the bridge. | |
260 | VLAN-1000 is mapped to VNI-1000. | |
261 | ''' | |
262 | node.run("ip link add dev vx-1000 type vxlan id 1000 dstport 4789") | |
263 | node.run("ip link set dev vx-1000 type vxlan nolearning") | |
264 | node.run("ip link set dev vx-1000 type vxlan local %s" % node_ip) | |
265 | node.run("ip link set dev vx-1000 type vxlan ttl 64") | |
266 | node.run("ip link set dev vx-1000 mtu 9152") | |
267 | node.run("ip link set dev vx-1000 type vxlan dev ipmr-lo group 239.1.1.100") | |
268 | node.run("ip link set dev vx-1000 up") | |
269 | ||
270 | # bridge attrs | |
271 | node.run("ip link set dev vx-1000 master bridge") | |
272 | node.run("/sbin/bridge link set dev vx-1000 neigh_suppress on") | |
273 | node.run("/sbin/bridge link set dev vx-1000 learning off") | |
274 | node.run("/sbin/bridge link set dev vx-1000 priority 8") | |
275 | node.run("/sbin/bridge vlan del vid 1 dev vx-1000") | |
276 | node.run("/sbin/bridge vlan del vid 1 untagged pvid dev vx-1000") | |
277 | node.run("/sbin/bridge vlan add vid 1000 dev vx-1000") | |
278 | node.run("/sbin/bridge vlan add vid 1000 untagged pvid dev vx-1000") | |
279 | ||
280 | ||
281 | def config_svi(node, svi_pip): | |
282 | ''' | |
283 | Create an SVI for VLAN 1000 | |
284 | ''' | |
285 | node.run("ip link add link bridge name vlan1000 type vlan id 1000 protocol 802.1q") | |
286 | node.run("ip addr add %s/24 dev vlan1000" % svi_pip) | |
287 | node.run("ip link set dev vlan1000 up") | |
288 | node.run("/sbin/sysctl net.ipv4.conf.vlan1000.arp_accept=1") | |
289 | node.run("ip link add link vlan1000 name vlan1000-v0 type macvlan mode private") | |
290 | node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.accept_dad=0") | |
291 | node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits") | |
292 | node.run("/sbin/sysctl net.ipv6.conf.vlan1000-v0.dad_transmits=0") | |
293 | node.run("ip link set dev vlan1000-v0 address 00:00:5e:00:01:01") | |
294 | node.run("ip link set dev vlan1000-v0 up") | |
295 | # metric 1024 is not working | |
296 | node.run("ip addr add 45.0.0.1/24 dev vlan1000-v0") | |
297 | ||
298 | ||
299 | def config_tor(tor_name, tor, tor_ip, svi_pip): | |
300 | ''' | |
301 | Create the bond/vxlan-bridge on the TOR which acts as VTEP and EPN-PE | |
302 | ''' | |
303 | # create a device for terminating VxLAN multicast tunnels | |
304 | config_mcast_tunnel_termination_device(tor) | |
305 | ||
306 | # create a vlan aware bridge | |
307 | config_bridge(tor) | |
308 | ||
309 | # create vxlan device and add it to bridge | |
310 | config_vxlan(tor, tor_ip) | |
311 | ||
312 | # create hostbonds and add them to the bridge | |
313 | if "torm1" in tor_name: | |
314 | sys_mac = "44:38:39:ff:ff:01" | |
315 | else: | |
316 | sys_mac = "44:38:39:ff:ff:02" | |
317 | bond_member = tor_name + "-eth2" | |
318 | config_bond(tor, "hostbond1", [bond_member], sys_mac, "bridge") | |
319 | ||
320 | bond_member = tor_name + "-eth3" | |
321 | config_bond(tor, "hostbond2", [bond_member], sys_mac, "bridge") | |
322 | ||
323 | # create SVI | |
324 | config_svi(tor, svi_pip) | |
325 | ||
326 | ||
327 | def config_tors(tgen, tors): | |
328 | for tor_name in tors: | |
329 | tor = tgen.gears[tor_name] | |
330 | config_tor(tor_name, tor, tor_ips.get(tor_name), svi_ips.get(tor_name)) | |
331 | ||
332 | def compute_host_ip_mac(host_name): | |
333 | host_id = host_name.split("hostd")[1] | |
334 | host_ip = "45.0.0."+ host_id + "/24" | |
335 | host_mac = "00:00:00:00:00:" + host_id | |
336 | ||
337 | return host_ip, host_mac | |
338 | ||
339 | def config_host(host_name, host): | |
340 | ''' | |
341 | Create the dual-attached bond on host nodes for MH | |
342 | ''' | |
343 | bond_members = [] | |
344 | bond_members.append(host_name + "-eth0") | |
345 | bond_members.append(host_name + "-eth1") | |
346 | bond_name = "torbond" | |
347 | config_bond(host, bond_name, bond_members, "00:00:00:00:00:00", None) | |
348 | ||
349 | host_ip, host_mac = compute_host_ip_mac(host_name) | |
350 | host.run("ip addr add %s dev %s" % (host_ip, bond_name)) | |
351 | host.run("ip link set dev %s address %s" % (bond_name, host_mac)) | |
352 | ||
353 | ||
354 | def config_hosts(tgen, hosts): | |
355 | for host_name in hosts: | |
356 | host = tgen.gears[host_name] | |
357 | config_host(host_name, host) | |
358 | ||
359 | ||
360 | def setup_module(module): | |
361 | "Setup topology" | |
362 | tgen = Topogen(NetworkTopo, module.__name__) | |
363 | tgen.start_topology() | |
364 | ||
365 | krel = platform.release() | |
366 | if topotest.version_cmp(krel, "4.19") < 0: | |
367 | tgen.errors = "kernel 4.19 needed for multihoming tests" | |
368 | pytest.skip(tgen.errors) | |
369 | ||
370 | tors = [] | |
371 | tors.append("torm11") | |
372 | tors.append("torm12") | |
373 | tors.append("torm21") | |
374 | tors.append("torm22") | |
375 | config_tors(tgen, tors) | |
376 | ||
377 | hosts = [] | |
378 | hosts.append("hostd11") | |
379 | hosts.append("hostd12") | |
380 | hosts.append("hostd21") | |
381 | hosts.append("hostd22") | |
382 | config_hosts(tgen, hosts) | |
383 | ||
384 | # tgen.mininet_cli() | |
385 | # This is a sample of configuration loading. | |
386 | router_list = tgen.routers() | |
387 | for rname, router in router_list.iteritems(): | |
388 | router.load_config( | |
389 | TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) | |
390 | ) | |
391 | router.load_config( | |
392 | TopoRouter.RD_PIM, os.path.join(CWD, "{}/pim.conf".format(rname)) | |
393 | ) | |
394 | router.load_config( | |
395 | TopoRouter.RD_BGP, os.path.join(CWD, "{}/evpn.conf".format(rname)) | |
396 | ) | |
397 | tgen.start_router() | |
398 | # tgen.mininet_cli() | |
399 | ||
400 | ||
401 | def teardown_module(_mod): | |
402 | "Teardown the pytest environment" | |
403 | tgen = get_topogen() | |
404 | ||
405 | # This function tears down the whole topology. | |
406 | tgen.stop_topology() | |
407 | ||
408 | ||
409 | def check_local_es(esi, vtep_ips, dut_name, down_vteps): | |
410 | ''' | |
411 | Check if ES peers are setup correctly on local ESs | |
412 | ''' | |
413 | peer_ips = [] | |
414 | if "torm1" in dut_name: | |
415 | tor_ips_rack = tor_ips_rack_1 | |
416 | else: | |
417 | tor_ips_rack = tor_ips_rack_2 | |
418 | ||
419 | for tor_name, tor_ip in tor_ips_rack.iteritems(): | |
420 | if dut_name not in tor_name: | |
421 | peer_ips.append(tor_ip) | |
422 | ||
423 | # remove down VTEPs from the peer check list | |
424 | peer_set = set(peer_ips) | |
425 | down_vtep_set = set(down_vteps) | |
426 | peer_set = peer_set - down_vtep_set | |
427 | ||
428 | vtep_set = set(vtep_ips) | |
429 | diff = peer_set.symmetric_difference(vtep_set) | |
430 | ||
431 | return (esi, diff) if diff else None | |
432 | ||
433 | ||
434 | def check_remote_es(esi, vtep_ips, dut_name, down_vteps): | |
435 | ''' | |
436 | Verify list of PEs associated with a remote ES | |
437 | ''' | |
438 | remote_ips = [] | |
439 | ||
440 | if "torm1" in dut_name: | |
441 | tor_ips_rack = tor_ips_rack_2 | |
442 | else: | |
443 | tor_ips_rack = tor_ips_rack_1 | |
444 | ||
445 | for tor_name, tor_ip in tor_ips_rack.iteritems(): | |
446 | remote_ips.append(tor_ip) | |
447 | ||
448 | # remove down VTEPs from the remote check list | |
449 | remote_set = set(remote_ips) | |
450 | down_vtep_set = set(down_vteps) | |
451 | remote_set = remote_set - down_vtep_set | |
452 | ||
453 | vtep_set = set(vtep_ips) | |
454 | diff = remote_set.symmetric_difference(vtep_set) | |
455 | ||
456 | return (esi, diff) if diff else None | |
457 | ||
458 | def check_es(dut): | |
459 | ''' | |
460 | Verify list of PEs associated all ESs, local and remote | |
461 | ''' | |
462 | bgp_es = dut.vtysh_cmd("show bgp l2vp evpn es json") | |
463 | bgp_es_json = json.loads(bgp_es) | |
464 | ||
465 | result = None | |
466 | ||
467 | expected_es_set = set([v for k, v in host_es_map.iteritems()]) | |
468 | curr_es_set = [] | |
469 | ||
470 | # check is ES content is correct | |
471 | for es in bgp_es_json: | |
472 | esi = es["esi"] | |
473 | curr_es_set.append(esi) | |
474 | types = es["type"] | |
475 | vtep_ips = [] | |
476 | for vtep in es["vteps"]: | |
477 | vtep_ips.append(vtep["vtep_ip"]) | |
478 | ||
479 | if "local" in types: | |
480 | result = check_local_es(esi, vtep_ips, dut.name, []) | |
481 | else: | |
482 | result = check_remote_es(esi, vtep_ips, dut.name, []) | |
483 | ||
484 | if result: | |
485 | return result | |
486 | ||
487 | # check if all ESs are present | |
488 | curr_es_set = set(curr_es_set) | |
489 | result = curr_es_set.symmetric_difference(expected_es_set) | |
490 | ||
491 | return result if result else None | |
492 | ||
493 | def check_one_es(dut, esi, down_vteps): | |
494 | ''' | |
495 | Verify list of PEs associated all ESs, local and remote | |
496 | ''' | |
497 | bgp_es = dut.vtysh_cmd("show bgp l2vp evpn es %s json" % esi) | |
498 | es = json.loads(bgp_es) | |
499 | ||
500 | if not es: | |
501 | return "esi %s not found" % esi | |
502 | ||
503 | esi = es["esi"] | |
504 | types = es["type"] | |
505 | vtep_ips = [] | |
506 | for vtep in es["vteps"]: | |
507 | vtep_ips.append(vtep["vtep_ip"]) | |
508 | ||
509 | if "local" in types: | |
510 | result = check_local_es(esi, vtep_ips, dut.name, down_vteps) | |
511 | else: | |
512 | result = check_remote_es(esi, vtep_ips, dut.name, down_vteps) | |
513 | ||
514 | return result | |
515 | ||
516 | def test_evpn_es(): | |
517 | ''' | |
518 | Two ES are setup on each rack. This test checks if - | |
519 | 1. ES peer has been added to the local ES (via Type-1/EAD route) | |
520 | 2. The remote ESs are setup with the right list of PEs (via Type-1) | |
521 | ''' | |
522 | ||
523 | tgen = get_topogen() | |
524 | ||
525 | if tgen.routers_have_failure(): | |
526 | pytest.skip(tgen.errors) | |
527 | ||
528 | dut_name = "torm11" | |
529 | dut = tgen.gears[dut_name] | |
530 | test_fn = partial(check_es, dut) | |
531 | _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3) | |
532 | ||
533 | assertmsg = '"{}" ES content incorrect'.format(dut_name) | |
534 | assert result is None, assertmsg | |
535 | # tgen.mininet_cli() | |
536 | ||
537 | def test_evpn_ead_update(): | |
538 | ''' | |
539 | Flap a host link one the remote rack and check if the EAD updates | |
540 | are sent/processed for the corresponding ESI | |
541 | ''' | |
542 | tgen = get_topogen() | |
543 | ||
544 | if tgen.routers_have_failure(): | |
545 | pytest.skip(tgen.errors) | |
546 | ||
547 | # dut on rack1 and host link flap on rack2 | |
548 | dut_name = "torm11" | |
549 | dut = tgen.gears[dut_name] | |
550 | ||
551 | remote_tor_name = "torm21" | |
552 | remote_tor = tgen.gears[remote_tor_name] | |
553 | ||
554 | host_name = "hostd21" | |
555 | host = tgen.gears[host_name] | |
556 | esi = host_es_map.get(host_name) | |
557 | ||
558 | # check if the VTEP list is right to start with | |
559 | down_vteps = [] | |
560 | test_fn = partial(check_one_es, dut, esi, down_vteps) | |
561 | _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3) | |
562 | assertmsg = '"{}" ES content incorrect'.format(dut_name) | |
563 | assert result is None, assertmsg | |
564 | ||
565 | # down a remote host link and check if the EAD withdraw is rxed | |
566 | # Note: LACP is not working as expected so I am temporarily shutting | |
567 | # down the link on the remote TOR instead of the remote host | |
568 | remote_tor.run("ip link set dev %s-%s down" % (remote_tor_name, "eth2")) | |
569 | down_vteps.append(tor_ips.get(remote_tor_name)) | |
570 | _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3) | |
571 | assertmsg = '"{}" ES incorrect after remote link down'.format(dut_name) | |
572 | assert result is None, assertmsg | |
573 | ||
574 | # bring up remote host link and check if the EAD update is rxed | |
575 | down_vteps.remove(tor_ips.get(remote_tor_name)) | |
576 | remote_tor.run("ip link set dev %s-%s up" % (remote_tor_name, "eth2")) | |
577 | _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3) | |
578 | assertmsg = '"{}" ES incorrect after remote link flap'.format(dut_name) | |
579 | assert result is None, assertmsg | |
580 | ||
581 | # tgen.mininet_cli() | |
582 | ||
583 | def check_mac(dut, vni, mac, m_type, esi, intf): | |
584 | ''' | |
585 | checks if mac is present and if desination matches the one provided | |
586 | ''' | |
587 | ||
588 | out = dut.vtysh_cmd("show evpn mac vni %d mac %s json" % (vni, mac)) | |
589 | ||
590 | mac_js = json.loads(out) | |
591 | for mac, info in mac_js.iteritems(): | |
592 | tmp_esi = info.get("esi", "") | |
593 | tmp_m_type = info.get("type", "") | |
594 | tmp_intf = info.get("intf", "") if tmp_m_type == "local" else "" | |
595 | if tmp_esi == esi and tmp_m_type == m_type and intf == intf: | |
596 | return None | |
597 | ||
598 | return "invalid vni %d mac %s out %s" % (vni, mac, mac_js) | |
599 | ||
600 | def test_evpn_mac(): | |
601 | ''' | |
602 | 1. Add a MAC on hostd11 and check if the MAC is synced between | |
603 | torm11 and torm12. And installed as a local MAC. | |
604 | 2. Add a MAC on hostd21 and check if the MAC is installed as a | |
605 | remote MAC on torm11 and torm12 | |
606 | ''' | |
607 | ||
608 | tgen = get_topogen() | |
609 | ||
610 | local_host = tgen.gears["hostd11"] | |
611 | remote_host = tgen.gears["hostd21"] | |
612 | tors = [] | |
613 | tors.append(tgen.gears["torm11"]) | |
614 | tors.append(tgen.gears["torm12"]) | |
615 | ||
616 | # ping the anycast gw from the local and remote hosts to populate | |
617 | # the mac address on the PEs | |
618 | local_host.run("arping -I torbond -c 1 45.0.0.1") | |
619 | remote_host.run("arping -I torbond -c 1 45.0.0.1") | |
620 | ||
621 | vni = 1000 | |
622 | ||
623 | # check if the rack-1 host MAC is present on all rack-1 PEs | |
624 | # and points to local access port | |
625 | m_type = "local" | |
626 | _, mac = compute_host_ip_mac(local_host.name) | |
627 | esi = host_es_map.get(local_host.name) | |
628 | intf = "hostbond1" | |
629 | ||
630 | for tor in tors: | |
631 | test_fn = partial(check_mac, tor, vni, mac, m_type, esi, intf) | |
632 | _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3) | |
633 | assertmsg = '"{}" local MAC content incorrect'.format(tor.name) | |
634 | assert result is None, assertmsg | |
635 | ||
636 | # check if the rack-2 host MAC is present on all rack-1 PEs | |
637 | # and points to the remote ES destination | |
638 | m_type = "remote" | |
639 | _, mac = compute_host_ip_mac(remote_host.name) | |
640 | esi = host_es_map.get(remote_host.name) | |
641 | intf = "" | |
642 | ||
643 | for tor in tors: | |
644 | test_fn = partial(check_mac, tor, vni, mac, m_type, esi, intf) | |
645 | _, result = topotest.run_and_expect(test_fn, None, count=20, wait=3) | |
646 | assertmsg = '"{}" remote MAC content incorrect'.format(tor.name) | |
647 | assert result is None, assertmsg | |
648 | ||
649 | if __name__ == "__main__": | |
650 | args = ["-s"] + sys.argv[1:] | |
651 | sys.exit(pytest.main(args)) |