]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * BGP Multipath | |
3 | * Copyright (C) 2010 Google Inc. | |
4 | * | |
5 | * This file is part of Quagga | |
6 | * | |
7 | * Quagga is free software; you can redistribute it and/or modify it | |
8 | * under the terms of the GNU General Public License as published by the | |
9 | * Free Software Foundation; either version 2, or (at your option) any | |
10 | * later version. | |
11 | * | |
12 | * Quagga is distributed in the hope that it will be useful, but | |
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along | |
18 | * with this program; see the file COPYING; if not, write to the Free Software | |
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
20 | */ | |
21 | ||
22 | #include <zebra.h> | |
23 | ||
24 | #include "command.h" | |
25 | #include "prefix.h" | |
26 | #include "linklist.h" | |
27 | #include "sockunion.h" | |
28 | #include "memory.h" | |
29 | #include "queue.h" | |
30 | #include "filter.h" | |
31 | ||
32 | #include "bgpd/bgpd.h" | |
33 | #include "bgpd/bgp_table.h" | |
34 | #include "bgpd/bgp_route.h" | |
35 | #include "bgpd/bgp_attr.h" | |
36 | #include "bgpd/bgp_debug.h" | |
37 | #include "bgpd/bgp_aspath.h" | |
38 | #include "bgpd/bgp_community.h" | |
39 | #include "bgpd/bgp_ecommunity.h" | |
40 | #include "bgpd/bgp_lcommunity.h" | |
41 | #include "bgpd/bgp_mpath.h" | |
42 | ||
43 | /* | |
44 | * bgp_maximum_paths_set | |
45 | * | |
46 | * Record maximum-paths configuration for BGP instance | |
47 | */ | |
48 | int bgp_maximum_paths_set(struct bgp *bgp, afi_t afi, safi_t safi, int peertype, | |
49 | uint16_t maxpaths, uint16_t options) | |
50 | { | |
51 | if (!bgp || (afi >= AFI_MAX) || (safi >= SAFI_MAX)) | |
52 | return -1; | |
53 | ||
54 | switch (peertype) { | |
55 | case BGP_PEER_IBGP: | |
56 | bgp->maxpaths[afi][safi].maxpaths_ibgp = maxpaths; | |
57 | bgp->maxpaths[afi][safi].ibgp_flags |= options; | |
58 | break; | |
59 | case BGP_PEER_EBGP: | |
60 | bgp->maxpaths[afi][safi].maxpaths_ebgp = maxpaths; | |
61 | break; | |
62 | default: | |
63 | return -1; | |
64 | } | |
65 | ||
66 | return 0; | |
67 | } | |
68 | ||
69 | /* | |
70 | * bgp_maximum_paths_unset | |
71 | * | |
72 | * Remove maximum-paths configuration from BGP instance | |
73 | */ | |
74 | int bgp_maximum_paths_unset(struct bgp *bgp, afi_t afi, safi_t safi, | |
75 | int peertype) | |
76 | { | |
77 | if (!bgp || (afi >= AFI_MAX) || (safi >= SAFI_MAX)) | |
78 | return -1; | |
79 | ||
80 | switch (peertype) { | |
81 | case BGP_PEER_IBGP: | |
82 | bgp->maxpaths[afi][safi].maxpaths_ibgp = multipath_num; | |
83 | bgp->maxpaths[afi][safi].ibgp_flags = 0; | |
84 | break; | |
85 | case BGP_PEER_EBGP: | |
86 | bgp->maxpaths[afi][safi].maxpaths_ebgp = multipath_num; | |
87 | break; | |
88 | default: | |
89 | return -1; | |
90 | } | |
91 | ||
92 | return 0; | |
93 | } | |
94 | ||
95 | /* | |
96 | * bgp_interface_same | |
97 | * | |
98 | * Return true if ifindex for ifp1 and ifp2 are the same, else return false. | |
99 | */ | |
100 | static int bgp_interface_same(struct interface *ifp1, struct interface *ifp2) | |
101 | { | |
102 | if (!ifp1 && !ifp2) | |
103 | return 1; | |
104 | ||
105 | if (!ifp1 && ifp2) | |
106 | return 0; | |
107 | ||
108 | if (ifp1 && !ifp2) | |
109 | return 0; | |
110 | ||
111 | return (ifp1->ifindex == ifp2->ifindex); | |
112 | } | |
113 | ||
114 | ||
115 | /* | |
116 | * bgp_path_info_nexthop_cmp | |
117 | * | |
118 | * Compare the nexthops of two paths. Return value is less than, equal to, | |
119 | * or greater than zero if bi1 is respectively less than, equal to, | |
120 | * or greater than bi2. | |
121 | */ | |
122 | int bgp_path_info_nexthop_cmp(struct bgp_path_info *bi1, | |
123 | struct bgp_path_info *bi2) | |
124 | { | |
125 | int compare; | |
126 | struct in6_addr addr1, addr2; | |
127 | ||
128 | compare = IPV4_ADDR_CMP(&bi1->attr->nexthop, &bi2->attr->nexthop); | |
129 | if (!compare) { | |
130 | if (bi1->attr->mp_nexthop_len == bi2->attr->mp_nexthop_len) { | |
131 | switch (bi1->attr->mp_nexthop_len) { | |
132 | case BGP_ATTR_NHLEN_IPV4: | |
133 | case BGP_ATTR_NHLEN_VPNV4: | |
134 | compare = IPV4_ADDR_CMP( | |
135 | &bi1->attr->mp_nexthop_global_in, | |
136 | &bi2->attr->mp_nexthop_global_in); | |
137 | break; | |
138 | case BGP_ATTR_NHLEN_IPV6_GLOBAL: | |
139 | case BGP_ATTR_NHLEN_VPNV6_GLOBAL: | |
140 | compare = IPV6_ADDR_CMP( | |
141 | &bi1->attr->mp_nexthop_global, | |
142 | &bi2->attr->mp_nexthop_global); | |
143 | break; | |
144 | case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL: | |
145 | addr1 = (bi1->attr->mp_nexthop_prefer_global) | |
146 | ? bi1->attr->mp_nexthop_global | |
147 | : bi1->attr->mp_nexthop_local; | |
148 | addr2 = (bi2->attr->mp_nexthop_prefer_global) | |
149 | ? bi2->attr->mp_nexthop_global | |
150 | : bi2->attr->mp_nexthop_local; | |
151 | ||
152 | if (!bi1->attr->mp_nexthop_prefer_global | |
153 | && !bi2->attr->mp_nexthop_prefer_global) | |
154 | compare = !bgp_interface_same( | |
155 | bi1->peer->ifp, bi2->peer->ifp); | |
156 | ||
157 | if (!compare) | |
158 | compare = IPV6_ADDR_CMP(&addr1, &addr2); | |
159 | break; | |
160 | } | |
161 | } | |
162 | ||
163 | /* This can happen if one IPv6 peer sends you global and | |
164 | * link-local | |
165 | * nexthops but another IPv6 peer only sends you global | |
166 | */ | |
167 | else if (bi1->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL | |
168 | || bi1->attr->mp_nexthop_len | |
169 | == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) { | |
170 | compare = IPV6_ADDR_CMP(&bi1->attr->mp_nexthop_global, | |
171 | &bi2->attr->mp_nexthop_global); | |
172 | if (!compare) { | |
173 | if (bi1->attr->mp_nexthop_len | |
174 | < bi2->attr->mp_nexthop_len) | |
175 | compare = -1; | |
176 | else | |
177 | compare = 1; | |
178 | } | |
179 | } | |
180 | } | |
181 | ||
182 | return compare; | |
183 | } | |
184 | ||
185 | /* | |
186 | * bgp_path_info_mpath_cmp | |
187 | * | |
188 | * This function determines our multipath list ordering. By ordering | |
189 | * the list we can deterministically select which paths are included | |
190 | * in the multipath set. The ordering also helps in detecting changes | |
191 | * in the multipath selection so we can detect whether to send an | |
192 | * update to zebra. | |
193 | * | |
194 | * The order of paths is determined first by received nexthop, and then | |
195 | * by peer address if the nexthops are the same. | |
196 | */ | |
197 | static int bgp_path_info_mpath_cmp(void *val1, void *val2) | |
198 | { | |
199 | struct bgp_path_info *bi1, *bi2; | |
200 | int compare; | |
201 | ||
202 | bi1 = val1; | |
203 | bi2 = val2; | |
204 | ||
205 | compare = bgp_path_info_nexthop_cmp(bi1, bi2); | |
206 | ||
207 | if (!compare) { | |
208 | if (!bi1->peer->su_remote && !bi2->peer->su_remote) | |
209 | compare = 0; | |
210 | else if (!bi1->peer->su_remote) | |
211 | compare = 1; | |
212 | else if (!bi2->peer->su_remote) | |
213 | compare = -1; | |
214 | else | |
215 | compare = sockunion_cmp(bi1->peer->su_remote, | |
216 | bi2->peer->su_remote); | |
217 | } | |
218 | ||
219 | return compare; | |
220 | } | |
221 | ||
222 | /* | |
223 | * bgp_mp_list_init | |
224 | * | |
225 | * Initialize the mp_list, which holds the list of multipaths | |
226 | * selected by bgp_best_selection | |
227 | */ | |
228 | void bgp_mp_list_init(struct list *mp_list) | |
229 | { | |
230 | assert(mp_list); | |
231 | memset(mp_list, 0, sizeof(struct list)); | |
232 | mp_list->cmp = bgp_path_info_mpath_cmp; | |
233 | } | |
234 | ||
235 | /* | |
236 | * bgp_mp_list_clear | |
237 | * | |
238 | * Clears all entries out of the mp_list | |
239 | */ | |
240 | void bgp_mp_list_clear(struct list *mp_list) | |
241 | { | |
242 | assert(mp_list); | |
243 | list_delete_all_node(mp_list); | |
244 | } | |
245 | ||
246 | /* | |
247 | * bgp_mp_list_add | |
248 | * | |
249 | * Adds a multipath entry to the mp_list | |
250 | */ | |
251 | void bgp_mp_list_add(struct list *mp_list, struct bgp_path_info *mpinfo) | |
252 | { | |
253 | assert(mp_list && mpinfo); | |
254 | listnode_add_sort(mp_list, mpinfo); | |
255 | } | |
256 | ||
257 | /* | |
258 | * bgp_path_info_mpath_new | |
259 | * | |
260 | * Allocate and zero memory for a new bgp_path_info_mpath element | |
261 | */ | |
262 | static struct bgp_path_info_mpath *bgp_path_info_mpath_new(void) | |
263 | { | |
264 | struct bgp_path_info_mpath *new_mpath; | |
265 | new_mpath = XCALLOC(MTYPE_BGP_MPATH_INFO, | |
266 | sizeof(struct bgp_path_info_mpath)); | |
267 | return new_mpath; | |
268 | } | |
269 | ||
270 | /* | |
271 | * bgp_path_info_mpath_free | |
272 | * | |
273 | * Release resources for a bgp_path_info_mpath element and zero out pointer | |
274 | */ | |
275 | void bgp_path_info_mpath_free(struct bgp_path_info_mpath **mpath) | |
276 | { | |
277 | if (mpath && *mpath) { | |
278 | if ((*mpath)->mp_attr) | |
279 | bgp_attr_unintern(&(*mpath)->mp_attr); | |
280 | XFREE(MTYPE_BGP_MPATH_INFO, *mpath); | |
281 | *mpath = NULL; | |
282 | } | |
283 | } | |
284 | ||
285 | /* | |
286 | * bgp_path_info_mpath_get | |
287 | * | |
288 | * Fetch the mpath element for the given bgp_path_info. Used for | |
289 | * doing lazy allocation. | |
290 | */ | |
291 | static struct bgp_path_info_mpath * | |
292 | bgp_path_info_mpath_get(struct bgp_path_info *path) | |
293 | { | |
294 | struct bgp_path_info_mpath *mpath; | |
295 | if (!path->mpath) { | |
296 | mpath = bgp_path_info_mpath_new(); | |
297 | if (!mpath) | |
298 | return NULL; | |
299 | path->mpath = mpath; | |
300 | mpath->mp_info = path; | |
301 | } | |
302 | return path->mpath; | |
303 | } | |
304 | ||
305 | /* | |
306 | * bgp_path_info_mpath_enqueue | |
307 | * | |
308 | * Enqueue a path onto the multipath list given the previous multipath | |
309 | * list entry | |
310 | */ | |
311 | static void bgp_path_info_mpath_enqueue(struct bgp_path_info *prev_info, | |
312 | struct bgp_path_info *path) | |
313 | { | |
314 | struct bgp_path_info_mpath *prev, *mpath; | |
315 | ||
316 | prev = bgp_path_info_mpath_get(prev_info); | |
317 | mpath = bgp_path_info_mpath_get(path); | |
318 | if (!prev || !mpath) | |
319 | return; | |
320 | ||
321 | mpath->mp_next = prev->mp_next; | |
322 | mpath->mp_prev = prev; | |
323 | if (prev->mp_next) | |
324 | prev->mp_next->mp_prev = mpath; | |
325 | prev->mp_next = mpath; | |
326 | ||
327 | SET_FLAG(path->flags, BGP_PATH_MULTIPATH); | |
328 | } | |
329 | ||
330 | /* | |
331 | * bgp_path_info_mpath_dequeue | |
332 | * | |
333 | * Remove a path from the multipath list | |
334 | */ | |
335 | void bgp_path_info_mpath_dequeue(struct bgp_path_info *path) | |
336 | { | |
337 | struct bgp_path_info_mpath *mpath = path->mpath; | |
338 | if (!mpath) | |
339 | return; | |
340 | if (mpath->mp_prev) | |
341 | mpath->mp_prev->mp_next = mpath->mp_next; | |
342 | if (mpath->mp_next) | |
343 | mpath->mp_next->mp_prev = mpath->mp_prev; | |
344 | mpath->mp_next = mpath->mp_prev = NULL; | |
345 | UNSET_FLAG(path->flags, BGP_PATH_MULTIPATH); | |
346 | } | |
347 | ||
348 | /* | |
349 | * bgp_path_info_mpath_next | |
350 | * | |
351 | * Given a bgp_path_info, return the next multipath entry | |
352 | */ | |
353 | struct bgp_path_info *bgp_path_info_mpath_next(struct bgp_path_info *path) | |
354 | { | |
355 | if (!path->mpath || !path->mpath->mp_next) | |
356 | return NULL; | |
357 | return path->mpath->mp_next->mp_info; | |
358 | } | |
359 | ||
360 | /* | |
361 | * bgp_path_info_mpath_first | |
362 | * | |
363 | * Given bestpath bgp_path_info, return the first multipath entry. | |
364 | */ | |
365 | struct bgp_path_info *bgp_path_info_mpath_first(struct bgp_path_info *path) | |
366 | { | |
367 | return bgp_path_info_mpath_next(path); | |
368 | } | |
369 | ||
370 | /* | |
371 | * bgp_path_info_mpath_count | |
372 | * | |
373 | * Given the bestpath bgp_path_info, return the number of multipath entries | |
374 | */ | |
375 | uint32_t bgp_path_info_mpath_count(struct bgp_path_info *path) | |
376 | { | |
377 | if (!path->mpath) | |
378 | return 0; | |
379 | return path->mpath->mp_count; | |
380 | } | |
381 | ||
382 | /* | |
383 | * bgp_path_info_mpath_count_set | |
384 | * | |
385 | * Sets the count of multipaths into bestpath's mpath element | |
386 | */ | |
387 | static void bgp_path_info_mpath_count_set(struct bgp_path_info *path, | |
388 | uint32_t count) | |
389 | { | |
390 | struct bgp_path_info_mpath *mpath; | |
391 | if (!count && !path->mpath) | |
392 | return; | |
393 | mpath = bgp_path_info_mpath_get(path); | |
394 | if (!mpath) | |
395 | return; | |
396 | mpath->mp_count = count; | |
397 | } | |
398 | ||
399 | /* | |
400 | * bgp_path_info_mpath_attr | |
401 | * | |
402 | * Given bestpath bgp_path_info, return aggregated attribute set used | |
403 | * for advertising the multipath route | |
404 | */ | |
405 | struct attr *bgp_path_info_mpath_attr(struct bgp_path_info *path) | |
406 | { | |
407 | if (!path->mpath) | |
408 | return NULL; | |
409 | return path->mpath->mp_attr; | |
410 | } | |
411 | ||
412 | /* | |
413 | * bgp_path_info_mpath_attr_set | |
414 | * | |
415 | * Sets the aggregated attribute into bestpath's mpath element | |
416 | */ | |
417 | static void bgp_path_info_mpath_attr_set(struct bgp_path_info *path, | |
418 | struct attr *attr) | |
419 | { | |
420 | struct bgp_path_info_mpath *mpath; | |
421 | if (!attr && !path->mpath) | |
422 | return; | |
423 | mpath = bgp_path_info_mpath_get(path); | |
424 | if (!mpath) | |
425 | return; | |
426 | mpath->mp_attr = attr; | |
427 | } | |
428 | ||
429 | /* | |
430 | * bgp_path_info_mpath_update | |
431 | * | |
432 | * Compare and sync up the multipath list with the mp_list generated by | |
433 | * bgp_best_selection | |
434 | */ | |
435 | void bgp_path_info_mpath_update(struct bgp_node *rn, | |
436 | struct bgp_path_info *new_best, | |
437 | struct bgp_path_info *old_best, | |
438 | struct list *mp_list, | |
439 | struct bgp_maxpaths_cfg *mpath_cfg) | |
440 | { | |
441 | uint16_t maxpaths, mpath_count, old_mpath_count; | |
442 | struct listnode *mp_node, *mp_next_node; | |
443 | struct bgp_path_info *cur_mpath, *new_mpath, *next_mpath, *prev_mpath; | |
444 | int mpath_changed, debug; | |
445 | char pfx_buf[PREFIX2STR_BUFFER], nh_buf[2][INET6_ADDRSTRLEN]; | |
446 | char path_buf[PATH_ADDPATH_STR_BUFFER]; | |
447 | ||
448 | mpath_changed = 0; | |
449 | maxpaths = multipath_num; | |
450 | mpath_count = 0; | |
451 | cur_mpath = NULL; | |
452 | old_mpath_count = 0; | |
453 | prev_mpath = new_best; | |
454 | mp_node = listhead(mp_list); | |
455 | debug = bgp_debug_bestpath(&rn->p); | |
456 | ||
457 | if (debug) | |
458 | prefix2str(&rn->p, pfx_buf, sizeof(pfx_buf)); | |
459 | ||
460 | if (new_best) { | |
461 | mpath_count++; | |
462 | if (new_best != old_best) | |
463 | bgp_path_info_mpath_dequeue(new_best); | |
464 | maxpaths = (new_best->peer->sort == BGP_PEER_IBGP) | |
465 | ? mpath_cfg->maxpaths_ibgp | |
466 | : mpath_cfg->maxpaths_ebgp; | |
467 | } | |
468 | ||
469 | if (old_best) { | |
470 | cur_mpath = bgp_path_info_mpath_first(old_best); | |
471 | old_mpath_count = bgp_path_info_mpath_count(old_best); | |
472 | bgp_path_info_mpath_count_set(old_best, 0); | |
473 | bgp_path_info_mpath_dequeue(old_best); | |
474 | } | |
475 | ||
476 | if (debug) | |
477 | zlog_debug( | |
478 | "%s: starting mpath update, newbest %s num candidates %d old-mpath-count %d", | |
479 | pfx_buf, new_best ? new_best->peer->host : "NONE", | |
480 | mp_list ? listcount(mp_list) : 0, old_mpath_count); | |
481 | ||
482 | /* | |
483 | * We perform an ordered walk through both lists in parallel. | |
484 | * The reason for the ordered walk is that if there are paths | |
485 | * that were previously multipaths and are still multipaths, the walk | |
486 | * should encounter them in both lists at the same time. Otherwise | |
487 | * there will be paths that are in one list or another, and we | |
488 | * will deal with these separately. | |
489 | * | |
490 | * Note that new_best might be somewhere in the mp_list, so we need | |
491 | * to skip over it | |
492 | */ | |
493 | while (mp_node || cur_mpath) { | |
494 | struct bgp_path_info *tmp_info; | |
495 | ||
496 | /* | |
497 | * We can bail out of this loop if all existing paths on the | |
498 | * multipath list have been visited (for cleanup purposes) and | |
499 | * the maxpath requirement is fulfulled | |
500 | */ | |
501 | if (!cur_mpath && (mpath_count >= maxpaths)) | |
502 | break; | |
503 | ||
504 | mp_next_node = mp_node ? listnextnode(mp_node) : NULL; | |
505 | next_mpath = | |
506 | cur_mpath ? bgp_path_info_mpath_next(cur_mpath) : NULL; | |
507 | tmp_info = mp_node ? listgetdata(mp_node) : NULL; | |
508 | ||
509 | if (debug) | |
510 | zlog_debug( | |
511 | "%s: comparing candidate %s with existing mpath %s", | |
512 | pfx_buf, | |
513 | tmp_info ? tmp_info->peer->host : "NONE", | |
514 | cur_mpath ? cur_mpath->peer->host : "NONE"); | |
515 | ||
516 | /* | |
517 | * If equal, the path was a multipath and is still a multipath. | |
518 | * Insert onto new multipath list if maxpaths allows. | |
519 | */ | |
520 | if (mp_node && (listgetdata(mp_node) == cur_mpath)) { | |
521 | list_delete_node(mp_list, mp_node); | |
522 | bgp_path_info_mpath_dequeue(cur_mpath); | |
523 | if ((mpath_count < maxpaths) | |
524 | && bgp_path_info_nexthop_cmp(prev_mpath, | |
525 | cur_mpath)) { | |
526 | bgp_path_info_mpath_enqueue(prev_mpath, | |
527 | cur_mpath); | |
528 | prev_mpath = cur_mpath; | |
529 | mpath_count++; | |
530 | if (debug) { | |
531 | bgp_path_info_path_with_addpath_rx_str( | |
532 | cur_mpath, path_buf); | |
533 | zlog_debug( | |
534 | "%s: %s is still multipath, cur count %d", | |
535 | pfx_buf, path_buf, mpath_count); | |
536 | } | |
537 | } else { | |
538 | mpath_changed = 1; | |
539 | if (debug) { | |
540 | bgp_path_info_path_with_addpath_rx_str( | |
541 | cur_mpath, path_buf); | |
542 | zlog_debug( | |
543 | "%s: remove mpath %s nexthop %s, cur count %d", | |
544 | pfx_buf, path_buf, | |
545 | inet_ntop(AF_INET, | |
546 | &cur_mpath->attr | |
547 | ->nexthop, | |
548 | nh_buf[0], | |
549 | sizeof(nh_buf[0])), | |
550 | mpath_count); | |
551 | } | |
552 | } | |
553 | mp_node = mp_next_node; | |
554 | cur_mpath = next_mpath; | |
555 | continue; | |
556 | } | |
557 | ||
558 | if (cur_mpath | |
559 | && (!mp_node | |
560 | || (bgp_path_info_mpath_cmp(cur_mpath, | |
561 | listgetdata(mp_node)) | |
562 | < 0))) { | |
563 | /* | |
564 | * If here, we have an old multipath and either the | |
565 | * mp_list | |
566 | * is finished or the next mp_node points to a later | |
567 | * multipath, so we need to purge this path from the | |
568 | * multipath list | |
569 | */ | |
570 | bgp_path_info_mpath_dequeue(cur_mpath); | |
571 | mpath_changed = 1; | |
572 | if (debug) { | |
573 | bgp_path_info_path_with_addpath_rx_str( | |
574 | cur_mpath, path_buf); | |
575 | zlog_debug( | |
576 | "%s: remove mpath %s nexthop %s, cur count %d", | |
577 | pfx_buf, path_buf, | |
578 | inet_ntop(AF_INET, | |
579 | &cur_mpath->attr->nexthop, | |
580 | nh_buf[0], sizeof(nh_buf[0])), | |
581 | mpath_count); | |
582 | } | |
583 | cur_mpath = next_mpath; | |
584 | } else { | |
585 | /* | |
586 | * If here, we have a path on the mp_list that was not | |
587 | * previously | |
588 | * a multipath (due to non-equivalance or maxpaths | |
589 | * exceeded), | |
590 | * or the matching multipath is sorted later in the | |
591 | * multipath | |
592 | * list. Before we enqueue the path on the new multipath | |
593 | * list, | |
594 | * make sure its not on the old_best multipath list or | |
595 | * referenced | |
596 | * via next_mpath: | |
597 | * - If next_mpath points to this new path, update | |
598 | * next_mpath to | |
599 | * point to the multipath after this one | |
600 | * - Dequeue the path from the multipath list just to | |
601 | * make sure | |
602 | */ | |
603 | new_mpath = listgetdata(mp_node); | |
604 | list_delete_node(mp_list, mp_node); | |
605 | assert(new_mpath); | |
606 | assert(prev_mpath); | |
607 | if ((mpath_count < maxpaths) && (new_mpath != new_best) | |
608 | && bgp_path_info_nexthop_cmp(prev_mpath, | |
609 | new_mpath)) { | |
610 | if (new_mpath == next_mpath) | |
611 | bgp_path_info_mpath_next(new_mpath); | |
612 | bgp_path_info_mpath_dequeue(new_mpath); | |
613 | ||
614 | bgp_path_info_mpath_enqueue(prev_mpath, | |
615 | new_mpath); | |
616 | prev_mpath = new_mpath; | |
617 | mpath_changed = 1; | |
618 | mpath_count++; | |
619 | if (debug) { | |
620 | bgp_path_info_path_with_addpath_rx_str( | |
621 | new_mpath, path_buf); | |
622 | zlog_debug( | |
623 | "%s: add mpath %s nexthop %s, cur count %d", | |
624 | pfx_buf, path_buf, | |
625 | inet_ntop(AF_INET, | |
626 | &new_mpath->attr | |
627 | ->nexthop, | |
628 | nh_buf[0], | |
629 | sizeof(nh_buf[0])), | |
630 | mpath_count); | |
631 | } | |
632 | } | |
633 | mp_node = mp_next_node; | |
634 | } | |
635 | } | |
636 | ||
637 | if (new_best) { | |
638 | if (debug) | |
639 | zlog_debug( | |
640 | "%s: New mpath count (incl newbest) %d mpath-change %s", | |
641 | pfx_buf, mpath_count, | |
642 | mpath_changed ? "YES" : "NO"); | |
643 | ||
644 | bgp_path_info_mpath_count_set(new_best, mpath_count - 1); | |
645 | if (mpath_changed | |
646 | || (bgp_path_info_mpath_count(new_best) != old_mpath_count)) | |
647 | SET_FLAG(new_best->flags, BGP_PATH_MULTIPATH_CHG); | |
648 | } | |
649 | } | |
650 | ||
651 | /* | |
652 | * bgp_mp_dmed_deselect | |
653 | * | |
654 | * Clean up multipath information for BGP_PATH_DMED_SELECTED path that | |
655 | * is not selected as best path | |
656 | */ | |
657 | void bgp_mp_dmed_deselect(struct bgp_path_info *dmed_best) | |
658 | { | |
659 | struct bgp_path_info *mpinfo, *mpnext; | |
660 | ||
661 | if (!dmed_best) | |
662 | return; | |
663 | ||
664 | for (mpinfo = bgp_path_info_mpath_first(dmed_best); mpinfo; | |
665 | mpinfo = mpnext) { | |
666 | mpnext = bgp_path_info_mpath_next(mpinfo); | |
667 | bgp_path_info_mpath_dequeue(mpinfo); | |
668 | } | |
669 | ||
670 | bgp_path_info_mpath_count_set(dmed_best, 0); | |
671 | UNSET_FLAG(dmed_best->flags, BGP_PATH_MULTIPATH_CHG); | |
672 | assert(bgp_path_info_mpath_first(dmed_best) == 0); | |
673 | } | |
674 | ||
675 | /* | |
676 | * bgp_path_info_mpath_aggregate_update | |
677 | * | |
678 | * Set the multipath aggregate attribute. We need to see if the | |
679 | * aggregate has changed and then set the ATTR_CHANGED flag on the | |
680 | * bestpath info so that a peer update will be generated. The | |
681 | * change is detected by generating the current attribute, | |
682 | * interning it, and then comparing the interned pointer with the | |
683 | * current value. We can skip this generate/compare step if there | |
684 | * is no change in multipath selection and no attribute change in | |
685 | * any multipath. | |
686 | */ | |
687 | void bgp_path_info_mpath_aggregate_update(struct bgp_path_info *new_best, | |
688 | struct bgp_path_info *old_best) | |
689 | { | |
690 | struct bgp_path_info *mpinfo; | |
691 | struct aspath *aspath; | |
692 | struct aspath *asmerge; | |
693 | struct attr *new_attr, *old_attr; | |
694 | uint8_t origin; | |
695 | struct community *community, *commerge; | |
696 | struct ecommunity *ecomm, *ecommerge; | |
697 | struct lcommunity *lcomm, *lcommerge; | |
698 | struct attr attr = {0}; | |
699 | ||
700 | if (old_best && (old_best != new_best) | |
701 | && (old_attr = bgp_path_info_mpath_attr(old_best))) { | |
702 | bgp_attr_unintern(&old_attr); | |
703 | bgp_path_info_mpath_attr_set(old_best, NULL); | |
704 | } | |
705 | ||
706 | if (!new_best) | |
707 | return; | |
708 | ||
709 | if (!bgp_path_info_mpath_count(new_best)) { | |
710 | if ((new_attr = bgp_path_info_mpath_attr(new_best))) { | |
711 | bgp_attr_unintern(&new_attr); | |
712 | bgp_path_info_mpath_attr_set(new_best, NULL); | |
713 | SET_FLAG(new_best->flags, BGP_PATH_ATTR_CHANGED); | |
714 | } | |
715 | return; | |
716 | } | |
717 | ||
718 | bgp_attr_dup(&attr, new_best->attr); | |
719 | ||
720 | if (new_best->peer && bgp_flag_check(new_best->peer->bgp, | |
721 | BGP_FLAG_MULTIPATH_RELAX_AS_SET)) { | |
722 | ||
723 | /* aggregate attribute from multipath constituents */ | |
724 | aspath = aspath_dup(attr.aspath); | |
725 | origin = attr.origin; | |
726 | community = | |
727 | attr.community ? community_dup(attr.community) : NULL; | |
728 | ecomm = (attr.ecommunity) ? ecommunity_dup(attr.ecommunity) | |
729 | : NULL; | |
730 | lcomm = (attr.lcommunity) ? lcommunity_dup(attr.lcommunity) | |
731 | : NULL; | |
732 | ||
733 | for (mpinfo = bgp_path_info_mpath_first(new_best); mpinfo; | |
734 | mpinfo = bgp_path_info_mpath_next(mpinfo)) { | |
735 | asmerge = | |
736 | aspath_aggregate(aspath, mpinfo->attr->aspath); | |
737 | aspath_free(aspath); | |
738 | aspath = asmerge; | |
739 | ||
740 | if (origin < mpinfo->attr->origin) | |
741 | origin = mpinfo->attr->origin; | |
742 | ||
743 | if (mpinfo->attr->community) { | |
744 | if (community) { | |
745 | commerge = community_merge( | |
746 | community, | |
747 | mpinfo->attr->community); | |
748 | community = | |
749 | community_uniq_sort(commerge); | |
750 | community_free(commerge); | |
751 | } else | |
752 | community = community_dup( | |
753 | mpinfo->attr->community); | |
754 | } | |
755 | ||
756 | if (mpinfo->attr->ecommunity) { | |
757 | if (ecomm) { | |
758 | ecommerge = ecommunity_merge( | |
759 | ecomm, | |
760 | mpinfo->attr->ecommunity); | |
761 | ecomm = ecommunity_uniq_sort(ecommerge); | |
762 | ecommunity_free(&ecommerge); | |
763 | } else | |
764 | ecomm = ecommunity_dup( | |
765 | mpinfo->attr->ecommunity); | |
766 | } | |
767 | if (mpinfo->attr->lcommunity) { | |
768 | if (lcomm) { | |
769 | lcommerge = lcommunity_merge( | |
770 | lcomm, | |
771 | mpinfo->attr->lcommunity); | |
772 | lcomm = lcommunity_uniq_sort(lcommerge); | |
773 | lcommunity_free(&lcommerge); | |
774 | } else | |
775 | lcomm = lcommunity_dup( | |
776 | mpinfo->attr->lcommunity); | |
777 | } | |
778 | } | |
779 | ||
780 | attr.aspath = aspath; | |
781 | attr.origin = origin; | |
782 | if (community) { | |
783 | attr.community = community; | |
784 | attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES); | |
785 | } | |
786 | if (ecomm) { | |
787 | attr.ecommunity = ecomm; | |
788 | attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES); | |
789 | } | |
790 | if (lcomm) { | |
791 | attr.lcommunity = lcomm; | |
792 | attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES); | |
793 | } | |
794 | ||
795 | /* Zap multipath attr nexthop so we set nexthop to self */ | |
796 | attr.nexthop.s_addr = 0; | |
797 | memset(&attr.mp_nexthop_global, 0, sizeof(struct in6_addr)); | |
798 | ||
799 | /* TODO: should we set ATOMIC_AGGREGATE and AGGREGATOR? */ | |
800 | } | |
801 | ||
802 | new_attr = bgp_attr_intern(&attr); | |
803 | ||
804 | if (new_attr != bgp_path_info_mpath_attr(new_best)) { | |
805 | if ((old_attr = bgp_path_info_mpath_attr(new_best))) | |
806 | bgp_attr_unintern(&old_attr); | |
807 | bgp_path_info_mpath_attr_set(new_best, new_attr); | |
808 | SET_FLAG(new_best->flags, BGP_PATH_ATTR_CHANGED); | |
809 | } else | |
810 | bgp_attr_unintern(&new_attr); | |
811 | } |