]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * BGP Multipath | |
3 | * Copyright (C) 2010 Google Inc. | |
4 | * | |
5 | * This file is part of Quagga | |
6 | * | |
7 | * Quagga is free software; you can redistribute it and/or modify it | |
8 | * under the terms of the GNU General Public License as published by the | |
9 | * Free Software Foundation; either version 2, or (at your option) any | |
10 | * later version. | |
11 | * | |
12 | * Quagga is distributed in the hope that it will be useful, but | |
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along | |
18 | * with this program; see the file COPYING; if not, write to the Free Software | |
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
20 | */ | |
21 | ||
22 | #include <zebra.h> | |
23 | ||
24 | #include "command.h" | |
25 | #include "prefix.h" | |
26 | #include "linklist.h" | |
27 | #include "sockunion.h" | |
28 | #include "memory.h" | |
29 | #include "queue.h" | |
30 | #include "filter.h" | |
31 | ||
32 | #include "bgpd/bgpd.h" | |
33 | #include "bgpd/bgp_table.h" | |
34 | #include "bgpd/bgp_route.h" | |
35 | #include "bgpd/bgp_attr.h" | |
36 | #include "bgpd/bgp_debug.h" | |
37 | #include "bgpd/bgp_aspath.h" | |
38 | #include "bgpd/bgp_community.h" | |
39 | #include "bgpd/bgp_ecommunity.h" | |
40 | #include "bgpd/bgp_lcommunity.h" | |
41 | #include "bgpd/bgp_mpath.h" | |
42 | ||
43 | /* | |
44 | * bgp_maximum_paths_set | |
45 | * | |
46 | * Record maximum-paths configuration for BGP instance | |
47 | */ | |
48 | int bgp_maximum_paths_set(struct bgp *bgp, afi_t afi, safi_t safi, int peertype, | |
49 | uint16_t maxpaths, uint16_t options) | |
50 | { | |
51 | if (!bgp || (afi >= AFI_MAX) || (safi >= SAFI_MAX)) | |
52 | return -1; | |
53 | ||
54 | switch (peertype) { | |
55 | case BGP_PEER_IBGP: | |
56 | bgp->maxpaths[afi][safi].maxpaths_ibgp = maxpaths; | |
57 | bgp->maxpaths[afi][safi].ibgp_flags |= options; | |
58 | break; | |
59 | case BGP_PEER_EBGP: | |
60 | bgp->maxpaths[afi][safi].maxpaths_ebgp = maxpaths; | |
61 | break; | |
62 | default: | |
63 | return -1; | |
64 | } | |
65 | ||
66 | return 0; | |
67 | } | |
68 | ||
69 | /* | |
70 | * bgp_maximum_paths_unset | |
71 | * | |
72 | * Remove maximum-paths configuration from BGP instance | |
73 | */ | |
74 | int bgp_maximum_paths_unset(struct bgp *bgp, afi_t afi, safi_t safi, | |
75 | int peertype) | |
76 | { | |
77 | if (!bgp || (afi >= AFI_MAX) || (safi >= SAFI_MAX)) | |
78 | return -1; | |
79 | ||
80 | switch (peertype) { | |
81 | case BGP_PEER_IBGP: | |
82 | bgp->maxpaths[afi][safi].maxpaths_ibgp = multipath_num; | |
83 | bgp->maxpaths[afi][safi].ibgp_flags = 0; | |
84 | break; | |
85 | case BGP_PEER_EBGP: | |
86 | bgp->maxpaths[afi][safi].maxpaths_ebgp = multipath_num; | |
87 | break; | |
88 | default: | |
89 | return -1; | |
90 | } | |
91 | ||
92 | return 0; | |
93 | } | |
94 | ||
95 | /* | |
96 | * bgp_interface_same | |
97 | * | |
98 | * Return true if ifindex for ifp1 and ifp2 are the same, else return false. | |
99 | */ | |
100 | static int bgp_interface_same(struct interface *ifp1, struct interface *ifp2) | |
101 | { | |
102 | if (!ifp1 && !ifp2) | |
103 | return 1; | |
104 | ||
105 | if (!ifp1 && ifp2) | |
106 | return 0; | |
107 | ||
108 | if (ifp1 && !ifp2) | |
109 | return 0; | |
110 | ||
111 | return (ifp1->ifindex == ifp2->ifindex); | |
112 | } | |
113 | ||
114 | ||
115 | /* | |
116 | * bgp_path_info_nexthop_cmp | |
117 | * | |
118 | * Compare the nexthops of two paths. Return value is less than, equal to, | |
119 | * or greater than zero if bpi1 is respectively less than, equal to, | |
120 | * or greater than bpi2. | |
121 | */ | |
122 | int bgp_path_info_nexthop_cmp(struct bgp_path_info *bpi1, | |
123 | struct bgp_path_info *bpi2) | |
124 | { | |
125 | int compare; | |
126 | struct in6_addr addr1, addr2; | |
127 | ||
128 | compare = IPV4_ADDR_CMP(&bpi1->attr->nexthop, &bpi2->attr->nexthop); | |
129 | if (!compare) { | |
130 | if (bpi1->attr->mp_nexthop_len == bpi2->attr->mp_nexthop_len) { | |
131 | switch (bpi1->attr->mp_nexthop_len) { | |
132 | case BGP_ATTR_NHLEN_IPV4: | |
133 | case BGP_ATTR_NHLEN_VPNV4: | |
134 | compare = IPV4_ADDR_CMP( | |
135 | &bpi1->attr->mp_nexthop_global_in, | |
136 | &bpi2->attr->mp_nexthop_global_in); | |
137 | break; | |
138 | case BGP_ATTR_NHLEN_IPV6_GLOBAL: | |
139 | case BGP_ATTR_NHLEN_VPNV6_GLOBAL: | |
140 | compare = IPV6_ADDR_CMP( | |
141 | &bpi1->attr->mp_nexthop_global, | |
142 | &bpi2->attr->mp_nexthop_global); | |
143 | break; | |
144 | case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL: | |
145 | addr1 = (bpi1->attr->mp_nexthop_prefer_global) | |
146 | ? bpi1->attr->mp_nexthop_global | |
147 | : bpi1->attr->mp_nexthop_local; | |
148 | addr2 = (bpi2->attr->mp_nexthop_prefer_global) | |
149 | ? bpi2->attr->mp_nexthop_global | |
150 | : bpi2->attr->mp_nexthop_local; | |
151 | ||
152 | if (!bpi1->attr->mp_nexthop_prefer_global | |
153 | && !bpi2->attr->mp_nexthop_prefer_global) | |
154 | compare = !bgp_interface_same( | |
155 | bpi1->peer->ifp, | |
156 | bpi2->peer->ifp); | |
157 | ||
158 | if (!compare) | |
159 | compare = IPV6_ADDR_CMP(&addr1, &addr2); | |
160 | break; | |
161 | } | |
162 | } | |
163 | ||
164 | /* This can happen if one IPv6 peer sends you global and | |
165 | * link-local | |
166 | * nexthops but another IPv6 peer only sends you global | |
167 | */ | |
168 | else if (bpi1->attr->mp_nexthop_len | |
169 | == BGP_ATTR_NHLEN_IPV6_GLOBAL | |
170 | || bpi1->attr->mp_nexthop_len | |
171 | == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) { | |
172 | compare = IPV6_ADDR_CMP(&bpi1->attr->mp_nexthop_global, | |
173 | &bpi2->attr->mp_nexthop_global); | |
174 | if (!compare) { | |
175 | if (bpi1->attr->mp_nexthop_len | |
176 | < bpi2->attr->mp_nexthop_len) | |
177 | compare = -1; | |
178 | else | |
179 | compare = 1; | |
180 | } | |
181 | } | |
182 | } | |
183 | ||
184 | return compare; | |
185 | } | |
186 | ||
187 | /* | |
188 | * bgp_path_info_mpath_cmp | |
189 | * | |
190 | * This function determines our multipath list ordering. By ordering | |
191 | * the list we can deterministically select which paths are included | |
192 | * in the multipath set. The ordering also helps in detecting changes | |
193 | * in the multipath selection so we can detect whether to send an | |
194 | * update to zebra. | |
195 | * | |
196 | * The order of paths is determined first by received nexthop, and then | |
197 | * by peer address if the nexthops are the same. | |
198 | */ | |
199 | static int bgp_path_info_mpath_cmp(void *val1, void *val2) | |
200 | { | |
201 | struct bgp_path_info *bpi1, *bpi2; | |
202 | int compare; | |
203 | ||
204 | bpi1 = val1; | |
205 | bpi2 = val2; | |
206 | ||
207 | compare = bgp_path_info_nexthop_cmp(bpi1, bpi2); | |
208 | ||
209 | if (!compare) { | |
210 | if (!bpi1->peer->su_remote && !bpi2->peer->su_remote) | |
211 | compare = 0; | |
212 | else if (!bpi1->peer->su_remote) | |
213 | compare = 1; | |
214 | else if (!bpi2->peer->su_remote) | |
215 | compare = -1; | |
216 | else | |
217 | compare = sockunion_cmp(bpi1->peer->su_remote, | |
218 | bpi2->peer->su_remote); | |
219 | } | |
220 | ||
221 | return compare; | |
222 | } | |
223 | ||
224 | /* | |
225 | * bgp_mp_list_init | |
226 | * | |
227 | * Initialize the mp_list, which holds the list of multipaths | |
228 | * selected by bgp_best_selection | |
229 | */ | |
230 | void bgp_mp_list_init(struct list *mp_list) | |
231 | { | |
232 | assert(mp_list); | |
233 | memset(mp_list, 0, sizeof(struct list)); | |
234 | mp_list->cmp = bgp_path_info_mpath_cmp; | |
235 | } | |
236 | ||
237 | /* | |
238 | * bgp_mp_list_clear | |
239 | * | |
240 | * Clears all entries out of the mp_list | |
241 | */ | |
242 | void bgp_mp_list_clear(struct list *mp_list) | |
243 | { | |
244 | assert(mp_list); | |
245 | list_delete_all_node(mp_list); | |
246 | } | |
247 | ||
248 | /* | |
249 | * bgp_mp_list_add | |
250 | * | |
251 | * Adds a multipath entry to the mp_list | |
252 | */ | |
253 | void bgp_mp_list_add(struct list *mp_list, struct bgp_path_info *mpinfo) | |
254 | { | |
255 | assert(mp_list && mpinfo); | |
256 | listnode_add_sort(mp_list, mpinfo); | |
257 | } | |
258 | ||
259 | /* | |
260 | * bgp_path_info_mpath_new | |
261 | * | |
262 | * Allocate and zero memory for a new bgp_path_info_mpath element | |
263 | */ | |
264 | static struct bgp_path_info_mpath *bgp_path_info_mpath_new(void) | |
265 | { | |
266 | struct bgp_path_info_mpath *new_mpath; | |
267 | new_mpath = XCALLOC(MTYPE_BGP_MPATH_INFO, | |
268 | sizeof(struct bgp_path_info_mpath)); | |
269 | return new_mpath; | |
270 | } | |
271 | ||
272 | /* | |
273 | * bgp_path_info_mpath_free | |
274 | * | |
275 | * Release resources for a bgp_path_info_mpath element and zero out pointer | |
276 | */ | |
277 | void bgp_path_info_mpath_free(struct bgp_path_info_mpath **mpath) | |
278 | { | |
279 | if (mpath && *mpath) { | |
280 | if ((*mpath)->mp_attr) | |
281 | bgp_attr_unintern(&(*mpath)->mp_attr); | |
282 | XFREE(MTYPE_BGP_MPATH_INFO, *mpath); | |
283 | *mpath = NULL; | |
284 | } | |
285 | } | |
286 | ||
287 | /* | |
288 | * bgp_path_info_mpath_get | |
289 | * | |
290 | * Fetch the mpath element for the given bgp_path_info. Used for | |
291 | * doing lazy allocation. | |
292 | */ | |
293 | static struct bgp_path_info_mpath * | |
294 | bgp_path_info_mpath_get(struct bgp_path_info *path) | |
295 | { | |
296 | struct bgp_path_info_mpath *mpath; | |
297 | if (!path->mpath) { | |
298 | mpath = bgp_path_info_mpath_new(); | |
299 | if (!mpath) | |
300 | return NULL; | |
301 | path->mpath = mpath; | |
302 | mpath->mp_info = path; | |
303 | } | |
304 | return path->mpath; | |
305 | } | |
306 | ||
307 | /* | |
308 | * bgp_path_info_mpath_enqueue | |
309 | * | |
310 | * Enqueue a path onto the multipath list given the previous multipath | |
311 | * list entry | |
312 | */ | |
313 | static void bgp_path_info_mpath_enqueue(struct bgp_path_info *prev_info, | |
314 | struct bgp_path_info *path) | |
315 | { | |
316 | struct bgp_path_info_mpath *prev, *mpath; | |
317 | ||
318 | prev = bgp_path_info_mpath_get(prev_info); | |
319 | mpath = bgp_path_info_mpath_get(path); | |
320 | if (!prev || !mpath) | |
321 | return; | |
322 | ||
323 | mpath->mp_next = prev->mp_next; | |
324 | mpath->mp_prev = prev; | |
325 | if (prev->mp_next) | |
326 | prev->mp_next->mp_prev = mpath; | |
327 | prev->mp_next = mpath; | |
328 | ||
329 | SET_FLAG(path->flags, BGP_PATH_MULTIPATH); | |
330 | } | |
331 | ||
332 | /* | |
333 | * bgp_path_info_mpath_dequeue | |
334 | * | |
335 | * Remove a path from the multipath list | |
336 | */ | |
337 | void bgp_path_info_mpath_dequeue(struct bgp_path_info *path) | |
338 | { | |
339 | struct bgp_path_info_mpath *mpath = path->mpath; | |
340 | if (!mpath) | |
341 | return; | |
342 | if (mpath->mp_prev) | |
343 | mpath->mp_prev->mp_next = mpath->mp_next; | |
344 | if (mpath->mp_next) | |
345 | mpath->mp_next->mp_prev = mpath->mp_prev; | |
346 | mpath->mp_next = mpath->mp_prev = NULL; | |
347 | UNSET_FLAG(path->flags, BGP_PATH_MULTIPATH); | |
348 | } | |
349 | ||
350 | /* | |
351 | * bgp_path_info_mpath_next | |
352 | * | |
353 | * Given a bgp_path_info, return the next multipath entry | |
354 | */ | |
355 | struct bgp_path_info *bgp_path_info_mpath_next(struct bgp_path_info *path) | |
356 | { | |
357 | if (!path->mpath || !path->mpath->mp_next) | |
358 | return NULL; | |
359 | return path->mpath->mp_next->mp_info; | |
360 | } | |
361 | ||
362 | /* | |
363 | * bgp_path_info_mpath_first | |
364 | * | |
365 | * Given bestpath bgp_path_info, return the first multipath entry. | |
366 | */ | |
367 | struct bgp_path_info *bgp_path_info_mpath_first(struct bgp_path_info *path) | |
368 | { | |
369 | return bgp_path_info_mpath_next(path); | |
370 | } | |
371 | ||
372 | /* | |
373 | * bgp_path_info_mpath_count | |
374 | * | |
375 | * Given the bestpath bgp_path_info, return the number of multipath entries | |
376 | */ | |
377 | uint32_t bgp_path_info_mpath_count(struct bgp_path_info *path) | |
378 | { | |
379 | if (!path->mpath) | |
380 | return 0; | |
381 | return path->mpath->mp_count; | |
382 | } | |
383 | ||
384 | /* | |
385 | * bgp_path_info_mpath_count_set | |
386 | * | |
387 | * Sets the count of multipaths into bestpath's mpath element | |
388 | */ | |
389 | static void bgp_path_info_mpath_count_set(struct bgp_path_info *path, | |
390 | uint32_t count) | |
391 | { | |
392 | struct bgp_path_info_mpath *mpath; | |
393 | if (!count && !path->mpath) | |
394 | return; | |
395 | mpath = bgp_path_info_mpath_get(path); | |
396 | if (!mpath) | |
397 | return; | |
398 | mpath->mp_count = count; | |
399 | } | |
400 | ||
401 | /* | |
402 | * bgp_path_info_mpath_attr | |
403 | * | |
404 | * Given bestpath bgp_path_info, return aggregated attribute set used | |
405 | * for advertising the multipath route | |
406 | */ | |
407 | struct attr *bgp_path_info_mpath_attr(struct bgp_path_info *path) | |
408 | { | |
409 | if (!path->mpath) | |
410 | return NULL; | |
411 | return path->mpath->mp_attr; | |
412 | } | |
413 | ||
414 | /* | |
415 | * bgp_path_info_mpath_attr_set | |
416 | * | |
417 | * Sets the aggregated attribute into bestpath's mpath element | |
418 | */ | |
419 | static void bgp_path_info_mpath_attr_set(struct bgp_path_info *path, | |
420 | struct attr *attr) | |
421 | { | |
422 | struct bgp_path_info_mpath *mpath; | |
423 | if (!attr && !path->mpath) | |
424 | return; | |
425 | mpath = bgp_path_info_mpath_get(path); | |
426 | if (!mpath) | |
427 | return; | |
428 | mpath->mp_attr = attr; | |
429 | } | |
430 | ||
431 | /* | |
432 | * bgp_path_info_mpath_update | |
433 | * | |
434 | * Compare and sync up the multipath list with the mp_list generated by | |
435 | * bgp_best_selection | |
436 | */ | |
437 | void bgp_path_info_mpath_update(struct bgp_node *rn, | |
438 | struct bgp_path_info *new_best, | |
439 | struct bgp_path_info *old_best, | |
440 | struct list *mp_list, | |
441 | struct bgp_maxpaths_cfg *mpath_cfg) | |
442 | { | |
443 | uint16_t maxpaths, mpath_count, old_mpath_count; | |
444 | struct listnode *mp_node, *mp_next_node; | |
445 | struct bgp_path_info *cur_mpath, *new_mpath, *next_mpath, *prev_mpath; | |
446 | int mpath_changed, debug; | |
447 | char pfx_buf[PREFIX2STR_BUFFER], nh_buf[2][INET6_ADDRSTRLEN]; | |
448 | char path_buf[PATH_ADDPATH_STR_BUFFER]; | |
449 | ||
450 | mpath_changed = 0; | |
451 | maxpaths = multipath_num; | |
452 | mpath_count = 0; | |
453 | cur_mpath = NULL; | |
454 | old_mpath_count = 0; | |
455 | prev_mpath = new_best; | |
456 | mp_node = listhead(mp_list); | |
457 | debug = bgp_debug_bestpath(&rn->p); | |
458 | ||
459 | if (debug) | |
460 | prefix2str(&rn->p, pfx_buf, sizeof(pfx_buf)); | |
461 | ||
462 | if (new_best) { | |
463 | mpath_count++; | |
464 | if (new_best != old_best) | |
465 | bgp_path_info_mpath_dequeue(new_best); | |
466 | maxpaths = (new_best->peer->sort == BGP_PEER_IBGP) | |
467 | ? mpath_cfg->maxpaths_ibgp | |
468 | : mpath_cfg->maxpaths_ebgp; | |
469 | } | |
470 | ||
471 | if (old_best) { | |
472 | cur_mpath = bgp_path_info_mpath_first(old_best); | |
473 | old_mpath_count = bgp_path_info_mpath_count(old_best); | |
474 | bgp_path_info_mpath_count_set(old_best, 0); | |
475 | bgp_path_info_mpath_dequeue(old_best); | |
476 | } | |
477 | ||
478 | if (debug) | |
479 | zlog_debug( | |
480 | "%s: starting mpath update, newbest %s num candidates %d old-mpath-count %d", | |
481 | pfx_buf, new_best ? new_best->peer->host : "NONE", | |
482 | mp_list ? listcount(mp_list) : 0, old_mpath_count); | |
483 | ||
484 | /* | |
485 | * We perform an ordered walk through both lists in parallel. | |
486 | * The reason for the ordered walk is that if there are paths | |
487 | * that were previously multipaths and are still multipaths, the walk | |
488 | * should encounter them in both lists at the same time. Otherwise | |
489 | * there will be paths that are in one list or another, and we | |
490 | * will deal with these separately. | |
491 | * | |
492 | * Note that new_best might be somewhere in the mp_list, so we need | |
493 | * to skip over it | |
494 | */ | |
495 | while (mp_node || cur_mpath) { | |
496 | struct bgp_path_info *tmp_info; | |
497 | ||
498 | /* | |
499 | * We can bail out of this loop if all existing paths on the | |
500 | * multipath list have been visited (for cleanup purposes) and | |
501 | * the maxpath requirement is fulfulled | |
502 | */ | |
503 | if (!cur_mpath && (mpath_count >= maxpaths)) | |
504 | break; | |
505 | ||
506 | mp_next_node = mp_node ? listnextnode(mp_node) : NULL; | |
507 | next_mpath = | |
508 | cur_mpath ? bgp_path_info_mpath_next(cur_mpath) : NULL; | |
509 | tmp_info = mp_node ? listgetdata(mp_node) : NULL; | |
510 | ||
511 | if (debug) | |
512 | zlog_debug( | |
513 | "%s: comparing candidate %s with existing mpath %s", | |
514 | pfx_buf, | |
515 | tmp_info ? tmp_info->peer->host : "NONE", | |
516 | cur_mpath ? cur_mpath->peer->host : "NONE"); | |
517 | ||
518 | /* | |
519 | * If equal, the path was a multipath and is still a multipath. | |
520 | * Insert onto new multipath list if maxpaths allows. | |
521 | */ | |
522 | if (mp_node && (listgetdata(mp_node) == cur_mpath)) { | |
523 | list_delete_node(mp_list, mp_node); | |
524 | bgp_path_info_mpath_dequeue(cur_mpath); | |
525 | if ((mpath_count < maxpaths) | |
526 | && bgp_path_info_nexthop_cmp(prev_mpath, | |
527 | cur_mpath)) { | |
528 | bgp_path_info_mpath_enqueue(prev_mpath, | |
529 | cur_mpath); | |
530 | prev_mpath = cur_mpath; | |
531 | mpath_count++; | |
532 | if (debug) { | |
533 | bgp_path_info_path_with_addpath_rx_str( | |
534 | cur_mpath, path_buf); | |
535 | zlog_debug( | |
536 | "%s: %s is still multipath, cur count %d", | |
537 | pfx_buf, path_buf, mpath_count); | |
538 | } | |
539 | } else { | |
540 | mpath_changed = 1; | |
541 | if (debug) { | |
542 | bgp_path_info_path_with_addpath_rx_str( | |
543 | cur_mpath, path_buf); | |
544 | zlog_debug( | |
545 | "%s: remove mpath %s nexthop %s, cur count %d", | |
546 | pfx_buf, path_buf, | |
547 | inet_ntop(AF_INET, | |
548 | &cur_mpath->attr | |
549 | ->nexthop, | |
550 | nh_buf[0], | |
551 | sizeof(nh_buf[0])), | |
552 | mpath_count); | |
553 | } | |
554 | } | |
555 | mp_node = mp_next_node; | |
556 | cur_mpath = next_mpath; | |
557 | continue; | |
558 | } | |
559 | ||
560 | if (cur_mpath | |
561 | && (!mp_node | |
562 | || (bgp_path_info_mpath_cmp(cur_mpath, | |
563 | listgetdata(mp_node)) | |
564 | < 0))) { | |
565 | /* | |
566 | * If here, we have an old multipath and either the | |
567 | * mp_list | |
568 | * is finished or the next mp_node points to a later | |
569 | * multipath, so we need to purge this path from the | |
570 | * multipath list | |
571 | */ | |
572 | bgp_path_info_mpath_dequeue(cur_mpath); | |
573 | mpath_changed = 1; | |
574 | if (debug) { | |
575 | bgp_path_info_path_with_addpath_rx_str( | |
576 | cur_mpath, path_buf); | |
577 | zlog_debug( | |
578 | "%s: remove mpath %s nexthop %s, cur count %d", | |
579 | pfx_buf, path_buf, | |
580 | inet_ntop(AF_INET, | |
581 | &cur_mpath->attr->nexthop, | |
582 | nh_buf[0], sizeof(nh_buf[0])), | |
583 | mpath_count); | |
584 | } | |
585 | cur_mpath = next_mpath; | |
586 | } else { | |
587 | /* | |
588 | * If here, we have a path on the mp_list that was not | |
589 | * previously | |
590 | * a multipath (due to non-equivalance or maxpaths | |
591 | * exceeded), | |
592 | * or the matching multipath is sorted later in the | |
593 | * multipath | |
594 | * list. Before we enqueue the path on the new multipath | |
595 | * list, | |
596 | * make sure its not on the old_best multipath list or | |
597 | * referenced | |
598 | * via next_mpath: | |
599 | * - If next_mpath points to this new path, update | |
600 | * next_mpath to | |
601 | * point to the multipath after this one | |
602 | * - Dequeue the path from the multipath list just to | |
603 | * make sure | |
604 | */ | |
605 | new_mpath = listgetdata(mp_node); | |
606 | list_delete_node(mp_list, mp_node); | |
607 | assert(new_mpath); | |
608 | assert(prev_mpath); | |
609 | if ((mpath_count < maxpaths) && (new_mpath != new_best) | |
610 | && bgp_path_info_nexthop_cmp(prev_mpath, | |
611 | new_mpath)) { | |
612 | if (new_mpath == next_mpath) | |
613 | bgp_path_info_mpath_next(new_mpath); | |
614 | bgp_path_info_mpath_dequeue(new_mpath); | |
615 | ||
616 | bgp_path_info_mpath_enqueue(prev_mpath, | |
617 | new_mpath); | |
618 | prev_mpath = new_mpath; | |
619 | mpath_changed = 1; | |
620 | mpath_count++; | |
621 | if (debug) { | |
622 | bgp_path_info_path_with_addpath_rx_str( | |
623 | new_mpath, path_buf); | |
624 | zlog_debug( | |
625 | "%s: add mpath %s nexthop %s, cur count %d", | |
626 | pfx_buf, path_buf, | |
627 | inet_ntop(AF_INET, | |
628 | &new_mpath->attr | |
629 | ->nexthop, | |
630 | nh_buf[0], | |
631 | sizeof(nh_buf[0])), | |
632 | mpath_count); | |
633 | } | |
634 | } | |
635 | mp_node = mp_next_node; | |
636 | } | |
637 | } | |
638 | ||
639 | if (new_best) { | |
640 | if (debug) | |
641 | zlog_debug( | |
642 | "%s: New mpath count (incl newbest) %d mpath-change %s", | |
643 | pfx_buf, mpath_count, | |
644 | mpath_changed ? "YES" : "NO"); | |
645 | ||
646 | bgp_path_info_mpath_count_set(new_best, mpath_count - 1); | |
647 | if (mpath_changed | |
648 | || (bgp_path_info_mpath_count(new_best) != old_mpath_count)) | |
649 | SET_FLAG(new_best->flags, BGP_PATH_MULTIPATH_CHG); | |
650 | } | |
651 | } | |
652 | ||
653 | /* | |
654 | * bgp_mp_dmed_deselect | |
655 | * | |
656 | * Clean up multipath information for BGP_PATH_DMED_SELECTED path that | |
657 | * is not selected as best path | |
658 | */ | |
659 | void bgp_mp_dmed_deselect(struct bgp_path_info *dmed_best) | |
660 | { | |
661 | struct bgp_path_info *mpinfo, *mpnext; | |
662 | ||
663 | if (!dmed_best) | |
664 | return; | |
665 | ||
666 | for (mpinfo = bgp_path_info_mpath_first(dmed_best); mpinfo; | |
667 | mpinfo = mpnext) { | |
668 | mpnext = bgp_path_info_mpath_next(mpinfo); | |
669 | bgp_path_info_mpath_dequeue(mpinfo); | |
670 | } | |
671 | ||
672 | bgp_path_info_mpath_count_set(dmed_best, 0); | |
673 | UNSET_FLAG(dmed_best->flags, BGP_PATH_MULTIPATH_CHG); | |
674 | assert(bgp_path_info_mpath_first(dmed_best) == 0); | |
675 | } | |
676 | ||
677 | /* | |
678 | * bgp_path_info_mpath_aggregate_update | |
679 | * | |
680 | * Set the multipath aggregate attribute. We need to see if the | |
681 | * aggregate has changed and then set the ATTR_CHANGED flag on the | |
682 | * bestpath info so that a peer update will be generated. The | |
683 | * change is detected by generating the current attribute, | |
684 | * interning it, and then comparing the interned pointer with the | |
685 | * current value. We can skip this generate/compare step if there | |
686 | * is no change in multipath selection and no attribute change in | |
687 | * any multipath. | |
688 | */ | |
689 | void bgp_path_info_mpath_aggregate_update(struct bgp_path_info *new_best, | |
690 | struct bgp_path_info *old_best) | |
691 | { | |
692 | struct bgp_path_info *mpinfo; | |
693 | struct aspath *aspath; | |
694 | struct aspath *asmerge; | |
695 | struct attr *new_attr, *old_attr; | |
696 | uint8_t origin; | |
697 | struct community *community, *commerge; | |
698 | struct ecommunity *ecomm, *ecommerge; | |
699 | struct lcommunity *lcomm, *lcommerge; | |
700 | struct attr attr = {0}; | |
701 | ||
702 | if (old_best && (old_best != new_best) | |
703 | && (old_attr = bgp_path_info_mpath_attr(old_best))) { | |
704 | bgp_attr_unintern(&old_attr); | |
705 | bgp_path_info_mpath_attr_set(old_best, NULL); | |
706 | } | |
707 | ||
708 | if (!new_best) | |
709 | return; | |
710 | ||
711 | if (!bgp_path_info_mpath_count(new_best)) { | |
712 | if ((new_attr = bgp_path_info_mpath_attr(new_best))) { | |
713 | bgp_attr_unintern(&new_attr); | |
714 | bgp_path_info_mpath_attr_set(new_best, NULL); | |
715 | SET_FLAG(new_best->flags, BGP_PATH_ATTR_CHANGED); | |
716 | } | |
717 | return; | |
718 | } | |
719 | ||
720 | bgp_attr_dup(&attr, new_best->attr); | |
721 | ||
722 | if (new_best->peer && bgp_flag_check(new_best->peer->bgp, | |
723 | BGP_FLAG_MULTIPATH_RELAX_AS_SET)) { | |
724 | ||
725 | /* aggregate attribute from multipath constituents */ | |
726 | aspath = aspath_dup(attr.aspath); | |
727 | origin = attr.origin; | |
728 | community = | |
729 | attr.community ? community_dup(attr.community) : NULL; | |
730 | ecomm = (attr.ecommunity) ? ecommunity_dup(attr.ecommunity) | |
731 | : NULL; | |
732 | lcomm = (attr.lcommunity) ? lcommunity_dup(attr.lcommunity) | |
733 | : NULL; | |
734 | ||
735 | for (mpinfo = bgp_path_info_mpath_first(new_best); mpinfo; | |
736 | mpinfo = bgp_path_info_mpath_next(mpinfo)) { | |
737 | asmerge = | |
738 | aspath_aggregate(aspath, mpinfo->attr->aspath); | |
739 | aspath_free(aspath); | |
740 | aspath = asmerge; | |
741 | ||
742 | if (origin < mpinfo->attr->origin) | |
743 | origin = mpinfo->attr->origin; | |
744 | ||
745 | if (mpinfo->attr->community) { | |
746 | if (community) { | |
747 | commerge = community_merge( | |
748 | community, | |
749 | mpinfo->attr->community); | |
750 | community = | |
751 | community_uniq_sort(commerge); | |
752 | community_free(&commerge); | |
753 | } else | |
754 | community = community_dup( | |
755 | mpinfo->attr->community); | |
756 | } | |
757 | ||
758 | if (mpinfo->attr->ecommunity) { | |
759 | if (ecomm) { | |
760 | ecommerge = ecommunity_merge( | |
761 | ecomm, | |
762 | mpinfo->attr->ecommunity); | |
763 | ecomm = ecommunity_uniq_sort(ecommerge); | |
764 | ecommunity_free(&ecommerge); | |
765 | } else | |
766 | ecomm = ecommunity_dup( | |
767 | mpinfo->attr->ecommunity); | |
768 | } | |
769 | if (mpinfo->attr->lcommunity) { | |
770 | if (lcomm) { | |
771 | lcommerge = lcommunity_merge( | |
772 | lcomm, | |
773 | mpinfo->attr->lcommunity); | |
774 | lcomm = lcommunity_uniq_sort(lcommerge); | |
775 | lcommunity_free(&lcommerge); | |
776 | } else | |
777 | lcomm = lcommunity_dup( | |
778 | mpinfo->attr->lcommunity); | |
779 | } | |
780 | } | |
781 | ||
782 | attr.aspath = aspath; | |
783 | attr.origin = origin; | |
784 | if (community) { | |
785 | attr.community = community; | |
786 | attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES); | |
787 | } | |
788 | if (ecomm) { | |
789 | attr.ecommunity = ecomm; | |
790 | attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES); | |
791 | } | |
792 | if (lcomm) { | |
793 | attr.lcommunity = lcomm; | |
794 | attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES); | |
795 | } | |
796 | ||
797 | /* Zap multipath attr nexthop so we set nexthop to self */ | |
798 | attr.nexthop.s_addr = 0; | |
799 | memset(&attr.mp_nexthop_global, 0, sizeof(struct in6_addr)); | |
800 | ||
801 | /* TODO: should we set ATOMIC_AGGREGATE and AGGREGATOR? */ | |
802 | } | |
803 | ||
804 | new_attr = bgp_attr_intern(&attr); | |
805 | ||
806 | if (new_attr != bgp_path_info_mpath_attr(new_best)) { | |
807 | if ((old_attr = bgp_path_info_mpath_attr(new_best))) | |
808 | bgp_attr_unintern(&old_attr); | |
809 | bgp_path_info_mpath_attr_set(new_best, new_attr); | |
810 | SET_FLAG(new_best->flags, BGP_PATH_ATTR_CHANGED); | |
811 | } else | |
812 | bgp_attr_unintern(&new_attr); | |
813 | } |