3 * Copyright (C) 2010 Google Inc.
5 * This file is part of Quagga
7 * Quagga is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2, or (at your option) any
12 * Quagga is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; see the file COPYING; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 #include "sockunion.h"
32 #include "bgpd/bgpd.h"
33 #include "bgpd/bgp_table.h"
34 #include "bgpd/bgp_route.h"
35 #include "bgpd/bgp_attr.h"
36 #include "bgpd/bgp_debug.h"
37 #include "bgpd/bgp_aspath.h"
38 #include "bgpd/bgp_community.h"
39 #include "bgpd/bgp_ecommunity.h"
40 #include "bgpd/bgp_lcommunity.h"
41 #include "bgpd/bgp_mpath.h"
44 * bgp_maximum_paths_set
46 * Record maximum-paths configuration for BGP instance
48 int bgp_maximum_paths_set(struct bgp
*bgp
, afi_t afi
, safi_t safi
, int peertype
,
49 uint16_t maxpaths
, bool same_clusterlen
)
51 if (!bgp
|| (afi
>= AFI_MAX
) || (safi
>= SAFI_MAX
))
56 bgp
->maxpaths
[afi
][safi
].maxpaths_ibgp
= maxpaths
;
57 bgp
->maxpaths
[afi
][safi
].same_clusterlen
= same_clusterlen
;
60 bgp
->maxpaths
[afi
][safi
].maxpaths_ebgp
= maxpaths
;
70 * bgp_maximum_paths_unset
72 * Remove maximum-paths configuration from BGP instance
74 int bgp_maximum_paths_unset(struct bgp
*bgp
, afi_t afi
, safi_t safi
,
77 if (!bgp
|| (afi
>= AFI_MAX
) || (safi
>= SAFI_MAX
))
82 bgp
->maxpaths
[afi
][safi
].maxpaths_ibgp
= multipath_num
;
83 bgp
->maxpaths
[afi
][safi
].same_clusterlen
= false;
86 bgp
->maxpaths
[afi
][safi
].maxpaths_ebgp
= multipath_num
;
98 * Return true if ifindex for ifp1 and ifp2 are the same, else return false.
100 static int bgp_interface_same(struct interface
*ifp1
, struct interface
*ifp2
)
111 return (ifp1
->ifindex
== ifp2
->ifindex
);
116 * bgp_path_info_nexthop_cmp
118 * Compare the nexthops of two paths. Return value is less than, equal to,
119 * or greater than zero if bpi1 is respectively less than, equal to,
120 * or greater than bpi2.
122 int bgp_path_info_nexthop_cmp(struct bgp_path_info
*bpi1
,
123 struct bgp_path_info
*bpi2
)
126 struct in6_addr addr1
, addr2
;
128 compare
= IPV4_ADDR_CMP(&bpi1
->attr
->nexthop
, &bpi2
->attr
->nexthop
);
130 if (bpi1
->attr
->mp_nexthop_len
== bpi2
->attr
->mp_nexthop_len
) {
131 switch (bpi1
->attr
->mp_nexthop_len
) {
132 case BGP_ATTR_NHLEN_IPV4
:
133 case BGP_ATTR_NHLEN_VPNV4
:
134 compare
= IPV4_ADDR_CMP(
135 &bpi1
->attr
->mp_nexthop_global_in
,
136 &bpi2
->attr
->mp_nexthop_global_in
);
138 case BGP_ATTR_NHLEN_IPV6_GLOBAL
:
139 case BGP_ATTR_NHLEN_VPNV6_GLOBAL
:
140 compare
= IPV6_ADDR_CMP(
141 &bpi1
->attr
->mp_nexthop_global
,
142 &bpi2
->attr
->mp_nexthop_global
);
144 case BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL
:
147 BGP_ATTR_NH_MP_PREFER_GLOBAL
))
148 ? bpi1
->attr
->mp_nexthop_global
149 : bpi1
->attr
->mp_nexthop_local
;
152 BGP_ATTR_NH_MP_PREFER_GLOBAL
))
153 ? bpi2
->attr
->mp_nexthop_global
154 : bpi2
->attr
->mp_nexthop_local
;
156 if (!CHECK_FLAG(bpi1
->attr
->nh_flag
,
157 BGP_ATTR_NH_MP_PREFER_GLOBAL
) &&
158 !CHECK_FLAG(bpi2
->attr
->nh_flag
,
159 BGP_ATTR_NH_MP_PREFER_GLOBAL
))
160 compare
= !bgp_interface_same(
165 compare
= IPV6_ADDR_CMP(&addr1
, &addr2
);
170 /* This can happen if one IPv6 peer sends you global and
172 * nexthops but another IPv6 peer only sends you global
174 else if (bpi1
->attr
->mp_nexthop_len
175 == BGP_ATTR_NHLEN_IPV6_GLOBAL
176 || bpi1
->attr
->mp_nexthop_len
177 == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL
) {
178 compare
= IPV6_ADDR_CMP(&bpi1
->attr
->mp_nexthop_global
,
179 &bpi2
->attr
->mp_nexthop_global
);
181 if (bpi1
->attr
->mp_nexthop_len
182 < bpi2
->attr
->mp_nexthop_len
)
191 * If both nexthops are same then check
192 * if they belong to same VRF
194 if (!compare
&& bpi1
->attr
->nh_type
!= NEXTHOP_TYPE_BLACKHOLE
) {
195 if (bpi1
->extra
&& bpi1
->extra
->bgp_orig
&& bpi2
->extra
196 && bpi2
->extra
->bgp_orig
) {
197 if (bpi1
->extra
->bgp_orig
->vrf_id
198 != bpi2
->extra
->bgp_orig
->vrf_id
) {
208 * bgp_path_info_mpath_cmp
210 * This function determines our multipath list ordering. By ordering
211 * the list we can deterministically select which paths are included
212 * in the multipath set. The ordering also helps in detecting changes
213 * in the multipath selection so we can detect whether to send an
216 * The order of paths is determined first by received nexthop, and then
217 * by peer address if the nexthops are the same.
219 static int bgp_path_info_mpath_cmp(void *val1
, void *val2
)
221 struct bgp_path_info
*bpi1
, *bpi2
;
227 compare
= bgp_path_info_nexthop_cmp(bpi1
, bpi2
);
230 if (!bpi1
->peer
->su_remote
&& !bpi2
->peer
->su_remote
)
232 else if (!bpi1
->peer
->su_remote
)
234 else if (!bpi2
->peer
->su_remote
)
237 compare
= sockunion_cmp(bpi1
->peer
->su_remote
,
238 bpi2
->peer
->su_remote
);
247 * Initialize the mp_list, which holds the list of multipaths
248 * selected by bgp_best_selection
250 void bgp_mp_list_init(struct list
*mp_list
)
253 memset(mp_list
, 0, sizeof(struct list
));
254 mp_list
->cmp
= bgp_path_info_mpath_cmp
;
260 * Clears all entries out of the mp_list
262 void bgp_mp_list_clear(struct list
*mp_list
)
265 list_delete_all_node(mp_list
);
271 * Adds a multipath entry to the mp_list
273 void bgp_mp_list_add(struct list
*mp_list
, struct bgp_path_info
*mpinfo
)
275 assert(mp_list
&& mpinfo
);
276 listnode_add_sort(mp_list
, mpinfo
);
280 * bgp_path_info_mpath_new
282 * Allocate and zero memory for a new bgp_path_info_mpath element
284 static struct bgp_path_info_mpath
*bgp_path_info_mpath_new(void)
286 struct bgp_path_info_mpath
*new_mpath
;
288 new_mpath
= XCALLOC(MTYPE_BGP_MPATH_INFO
,
289 sizeof(struct bgp_path_info_mpath
));
295 * bgp_path_info_mpath_free
297 * Release resources for a bgp_path_info_mpath element and zero out pointer
299 void bgp_path_info_mpath_free(struct bgp_path_info_mpath
**mpath
)
301 if (mpath
&& *mpath
) {
302 if ((*mpath
)->mp_attr
)
303 bgp_attr_unintern(&(*mpath
)->mp_attr
);
304 XFREE(MTYPE_BGP_MPATH_INFO
, *mpath
);
309 * bgp_path_info_mpath_get
311 * Fetch the mpath element for the given bgp_path_info. Used for
312 * doing lazy allocation.
314 static struct bgp_path_info_mpath
*
315 bgp_path_info_mpath_get(struct bgp_path_info
*path
)
317 struct bgp_path_info_mpath
*mpath
;
323 mpath
= bgp_path_info_mpath_new();
325 mpath
->mp_info
= path
;
331 * bgp_path_info_mpath_enqueue
333 * Enqueue a path onto the multipath list given the previous multipath
336 static void bgp_path_info_mpath_enqueue(struct bgp_path_info
*prev_info
,
337 struct bgp_path_info
*path
)
339 struct bgp_path_info_mpath
*prev
, *mpath
;
341 prev
= bgp_path_info_mpath_get(prev_info
);
342 mpath
= bgp_path_info_mpath_get(path
);
346 mpath
->mp_next
= prev
->mp_next
;
347 mpath
->mp_prev
= prev
;
349 prev
->mp_next
->mp_prev
= mpath
;
350 prev
->mp_next
= mpath
;
352 SET_FLAG(path
->flags
, BGP_PATH_MULTIPATH
);
356 * bgp_path_info_mpath_dequeue
358 * Remove a path from the multipath list
360 void bgp_path_info_mpath_dequeue(struct bgp_path_info
*path
)
362 struct bgp_path_info_mpath
*mpath
= path
->mpath
;
366 mpath
->mp_prev
->mp_next
= mpath
->mp_next
;
368 mpath
->mp_next
->mp_prev
= mpath
->mp_prev
;
369 mpath
->mp_next
= mpath
->mp_prev
= NULL
;
370 UNSET_FLAG(path
->flags
, BGP_PATH_MULTIPATH
);
374 * bgp_path_info_mpath_next
376 * Given a bgp_path_info, return the next multipath entry
378 struct bgp_path_info
*bgp_path_info_mpath_next(struct bgp_path_info
*path
)
380 if (!path
->mpath
|| !path
->mpath
->mp_next
)
382 return path
->mpath
->mp_next
->mp_info
;
386 * bgp_path_info_mpath_first
388 * Given bestpath bgp_path_info, return the first multipath entry.
390 struct bgp_path_info
*bgp_path_info_mpath_first(struct bgp_path_info
*path
)
392 return bgp_path_info_mpath_next(path
);
396 * bgp_path_info_mpath_count
398 * Given the bestpath bgp_path_info, return the number of multipath entries
400 uint32_t bgp_path_info_mpath_count(struct bgp_path_info
*path
)
404 return path
->mpath
->mp_count
;
408 * bgp_path_info_mpath_count_set
410 * Sets the count of multipaths into bestpath's mpath element
412 static void bgp_path_info_mpath_count_set(struct bgp_path_info
*path
,
415 struct bgp_path_info_mpath
*mpath
;
416 if (!count
&& !path
->mpath
)
418 mpath
= bgp_path_info_mpath_get(path
);
421 mpath
->mp_count
= count
;
425 * bgp_path_info_mpath_lb_update
427 * Update cumulative info related to link-bandwidth
429 static void bgp_path_info_mpath_lb_update(struct bgp_path_info
*path
, bool set
,
430 bool all_paths_lb
, uint64_t cum_bw
)
432 struct bgp_path_info_mpath
*mpath
;
436 if (!set
|| (cum_bw
== 0 && !all_paths_lb
))
439 mpath
= bgp_path_info_mpath_get(path
);
445 SET_FLAG(mpath
->mp_flags
, BGP_MP_LB_PRESENT
);
447 UNSET_FLAG(mpath
->mp_flags
, BGP_MP_LB_PRESENT
);
449 SET_FLAG(mpath
->mp_flags
, BGP_MP_LB_ALL
);
451 UNSET_FLAG(mpath
->mp_flags
, BGP_MP_LB_ALL
);
452 mpath
->cum_bw
= cum_bw
;
460 * bgp_path_info_mpath_attr
462 * Given bestpath bgp_path_info, return aggregated attribute set used
463 * for advertising the multipath route
465 struct attr
*bgp_path_info_mpath_attr(struct bgp_path_info
*path
)
469 return path
->mpath
->mp_attr
;
473 * bgp_path_info_chkwtd
475 * Return if we should attempt to do weighted ECMP or not
476 * The path passed in is the bestpath.
478 bool bgp_path_info_mpath_chkwtd(struct bgp
*bgp
, struct bgp_path_info
*path
)
480 /* Check if told to ignore weights or not multipath */
481 if (bgp
->lb_handling
== BGP_LINK_BW_IGNORE_BW
|| !path
->mpath
)
484 /* All paths in multipath should have associated weight (bandwidth)
485 * unless told explicitly otherwise.
487 if (bgp
->lb_handling
!= BGP_LINK_BW_SKIP_MISSING
&&
488 bgp
->lb_handling
!= BGP_LINK_BW_DEFWT_4_MISSING
)
489 return (path
->mpath
->mp_flags
& BGP_MP_LB_ALL
);
491 /* At least one path should have bandwidth. */
492 return (path
->mpath
->mp_flags
& BGP_MP_LB_PRESENT
);
496 * bgp_path_info_mpath_attr
498 * Given bestpath bgp_path_info, return cumulative bandwidth
499 * computed for all multipaths with bandwidth info
501 uint64_t bgp_path_info_mpath_cumbw(struct bgp_path_info
*path
)
505 return path
->mpath
->cum_bw
;
509 * bgp_path_info_mpath_attr_set
511 * Sets the aggregated attribute into bestpath's mpath element
513 static void bgp_path_info_mpath_attr_set(struct bgp_path_info
*path
,
516 struct bgp_path_info_mpath
*mpath
;
517 if (!attr
&& !path
->mpath
)
519 mpath
= bgp_path_info_mpath_get(path
);
522 mpath
->mp_attr
= attr
;
526 * bgp_path_info_mpath_update
528 * Compare and sync up the multipath list with the mp_list generated by
531 void bgp_path_info_mpath_update(struct bgp
*bgp
, struct bgp_dest
*dest
,
532 struct bgp_path_info
*new_best
,
533 struct bgp_path_info
*old_best
,
534 struct list
*mp_list
,
535 struct bgp_maxpaths_cfg
*mpath_cfg
)
537 uint16_t maxpaths
, mpath_count
, old_mpath_count
;
539 uint64_t cum_bw
, old_cum_bw
;
540 struct listnode
*mp_node
, *mp_next_node
;
541 struct bgp_path_info
*cur_mpath
, *new_mpath
, *next_mpath
, *prev_mpath
;
542 int mpath_changed
, debug
;
544 char path_buf
[PATH_ADDPATH_STR_BUFFER
];
547 maxpaths
= multipath_num
;
551 old_cum_bw
= cum_bw
= 0;
552 prev_mpath
= new_best
;
553 mp_node
= listhead(mp_list
);
554 debug
= bgp_debug_bestpath(dest
);
558 if (new_best
!= old_best
)
559 bgp_path_info_mpath_dequeue(new_best
);
560 maxpaths
= (new_best
->peer
->sort
== BGP_PEER_IBGP
)
561 ? mpath_cfg
->maxpaths_ibgp
562 : mpath_cfg
->maxpaths_ebgp
;
566 cur_mpath
= bgp_path_info_mpath_first(old_best
);
567 old_mpath_count
= bgp_path_info_mpath_count(old_best
);
568 old_cum_bw
= bgp_path_info_mpath_cumbw(old_best
);
569 bgp_path_info_mpath_count_set(old_best
, 0);
570 bgp_path_info_mpath_lb_update(old_best
, false, false, 0);
571 bgp_path_info_mpath_dequeue(old_best
);
576 "%pRN(%s): starting mpath update, newbest %s num candidates %d old-mpath-count %d old-cum-bw %" PRIu64
,
577 bgp_dest_to_rnode(dest
), bgp
->name_pretty
,
578 new_best
? new_best
->peer
->host
: "NONE",
579 mp_list
? listcount(mp_list
) : 0, old_mpath_count
,
583 * We perform an ordered walk through both lists in parallel.
584 * The reason for the ordered walk is that if there are paths
585 * that were previously multipaths and are still multipaths, the walk
586 * should encounter them in both lists at the same time. Otherwise
587 * there will be paths that are in one list or another, and we
588 * will deal with these separately.
590 * Note that new_best might be somewhere in the mp_list, so we need
593 all_paths_lb
= true; /* We'll reset if any path doesn't have LB. */
594 while (mp_node
|| cur_mpath
) {
595 struct bgp_path_info
*tmp_info
;
598 * We can bail out of this loop if all existing paths on the
599 * multipath list have been visited (for cleanup purposes) and
600 * the maxpath requirement is fulfulled
602 if (!cur_mpath
&& (mpath_count
>= maxpaths
))
605 mp_next_node
= mp_node
? listnextnode(mp_node
) : NULL
;
607 cur_mpath
? bgp_path_info_mpath_next(cur_mpath
) : NULL
;
608 tmp_info
= mp_node
? listgetdata(mp_node
) : NULL
;
612 "%pRN(%s): comparing candidate %s with existing mpath %s",
613 bgp_dest_to_rnode(dest
), bgp
->name_pretty
,
614 tmp_info
? tmp_info
->peer
->host
: "NONE",
615 cur_mpath
? cur_mpath
->peer
->host
: "NONE");
618 * If equal, the path was a multipath and is still a multipath.
619 * Insert onto new multipath list if maxpaths allows.
621 if (mp_node
&& (listgetdata(mp_node
) == cur_mpath
)) {
622 list_delete_node(mp_list
, mp_node
);
623 bgp_path_info_mpath_dequeue(cur_mpath
);
624 if ((mpath_count
< maxpaths
)
626 && bgp_path_info_nexthop_cmp(prev_mpath
,
628 bgp_path_info_mpath_enqueue(prev_mpath
,
630 prev_mpath
= cur_mpath
;
632 if (ecommunity_linkbw_present(
633 bgp_attr_get_ecommunity(
638 all_paths_lb
= false;
640 bgp_path_info_path_with_addpath_rx_str(
644 "%pRN: %s is still multipath, cur count %d",
645 bgp_dest_to_rnode(dest
),
646 path_buf
, mpath_count
);
651 bgp_path_info_path_with_addpath_rx_str(
655 "%pRN: remove mpath %s nexthop %pI4, cur count %d",
656 bgp_dest_to_rnode(dest
),
658 &cur_mpath
->attr
->nexthop
,
662 mp_node
= mp_next_node
;
663 cur_mpath
= next_mpath
;
669 || (bgp_path_info_mpath_cmp(cur_mpath
,
670 listgetdata(mp_node
))
673 * If here, we have an old multipath and either the
675 * is finished or the next mp_node points to a later
676 * multipath, so we need to purge this path from the
679 bgp_path_info_mpath_dequeue(cur_mpath
);
682 bgp_path_info_path_with_addpath_rx_str(
683 cur_mpath
, path_buf
, sizeof(path_buf
));
685 "%pRN: remove mpath %s nexthop %pI4, cur count %d",
686 bgp_dest_to_rnode(dest
), path_buf
,
687 &cur_mpath
->attr
->nexthop
, mpath_count
);
689 cur_mpath
= next_mpath
;
692 * If here, we have a path on the mp_list that was not
694 * a multipath (due to non-equivalance or maxpaths
696 * or the matching multipath is sorted later in the
698 * list. Before we enqueue the path on the new multipath
700 * make sure its not on the old_best multipath list or
703 * - If next_mpath points to this new path, update
705 * point to the multipath after this one
706 * - Dequeue the path from the multipath list just to
709 new_mpath
= listgetdata(mp_node
);
710 list_delete_node(mp_list
, mp_node
);
713 if ((mpath_count
< maxpaths
) && (new_mpath
!= new_best
)
714 && bgp_path_info_nexthop_cmp(prev_mpath
,
716 bgp_path_info_mpath_dequeue(new_mpath
);
718 bgp_path_info_mpath_enqueue(prev_mpath
,
720 prev_mpath
= new_mpath
;
723 if (ecommunity_linkbw_present(
724 bgp_attr_get_ecommunity(
729 all_paths_lb
= false;
731 bgp_path_info_path_with_addpath_rx_str(
735 "%pRN: add mpath %s nexthop %pI4, cur count %d",
736 bgp_dest_to_rnode(dest
),
738 &new_mpath
->attr
->nexthop
,
742 mp_node
= mp_next_node
;
747 bgp_path_info_mpath_count_set(new_best
, mpath_count
- 1);
748 if (mpath_count
<= 1 ||
749 !ecommunity_linkbw_present(
750 bgp_attr_get_ecommunity(new_best
->attr
), &bwval
))
751 all_paths_lb
= false;
754 bgp_path_info_mpath_lb_update(new_best
, true,
755 all_paths_lb
, cum_bw
);
759 "%pRN(%s): New mpath count (incl newbest) %d mpath-change %s all_paths_lb %d cum_bw %" PRIu64
,
760 bgp_dest_to_rnode(dest
), bgp
->name_pretty
,
761 mpath_count
, mpath_changed
? "YES" : "NO",
762 all_paths_lb
, cum_bw
);
765 || (bgp_path_info_mpath_count(new_best
) != old_mpath_count
))
766 SET_FLAG(new_best
->flags
, BGP_PATH_MULTIPATH_CHG
);
767 if ((mpath_count
- 1) != old_mpath_count
||
768 old_cum_bw
!= cum_bw
)
769 SET_FLAG(new_best
->flags
, BGP_PATH_LINK_BW_CHG
);
774 * bgp_mp_dmed_deselect
776 * Clean up multipath information for BGP_PATH_DMED_SELECTED path that
777 * is not selected as best path
779 void bgp_mp_dmed_deselect(struct bgp_path_info
*dmed_best
)
781 struct bgp_path_info
*mpinfo
, *mpnext
;
786 for (mpinfo
= bgp_path_info_mpath_first(dmed_best
); mpinfo
;
788 mpnext
= bgp_path_info_mpath_next(mpinfo
);
789 bgp_path_info_mpath_dequeue(mpinfo
);
792 bgp_path_info_mpath_count_set(dmed_best
, 0);
793 UNSET_FLAG(dmed_best
->flags
, BGP_PATH_MULTIPATH_CHG
);
794 UNSET_FLAG(dmed_best
->flags
, BGP_PATH_LINK_BW_CHG
);
795 assert(bgp_path_info_mpath_first(dmed_best
) == NULL
);
799 * bgp_path_info_mpath_aggregate_update
801 * Set the multipath aggregate attribute. We need to see if the
802 * aggregate has changed and then set the ATTR_CHANGED flag on the
803 * bestpath info so that a peer update will be generated. The
804 * change is detected by generating the current attribute,
805 * interning it, and then comparing the interned pointer with the
806 * current value. We can skip this generate/compare step if there
807 * is no change in multipath selection and no attribute change in
810 void bgp_path_info_mpath_aggregate_update(struct bgp_path_info
*new_best
,
811 struct bgp_path_info
*old_best
)
813 struct bgp_path_info
*mpinfo
;
814 struct aspath
*aspath
;
815 struct aspath
*asmerge
;
816 struct attr
*new_attr
, *old_attr
;
818 struct community
*community
, *commerge
;
819 struct ecommunity
*ecomm
, *ecommerge
;
820 struct lcommunity
*lcomm
, *lcommerge
;
821 struct attr attr
= {0};
823 if (old_best
&& (old_best
!= new_best
)
824 && (old_attr
= bgp_path_info_mpath_attr(old_best
))) {
825 bgp_attr_unintern(&old_attr
);
826 bgp_path_info_mpath_attr_set(old_best
, NULL
);
832 if (!bgp_path_info_mpath_count(new_best
)) {
833 if ((new_attr
= bgp_path_info_mpath_attr(new_best
))) {
834 bgp_attr_unintern(&new_attr
);
835 bgp_path_info_mpath_attr_set(new_best
, NULL
);
836 SET_FLAG(new_best
->flags
, BGP_PATH_ATTR_CHANGED
);
841 attr
= *new_best
->attr
;
844 && CHECK_FLAG(new_best
->peer
->bgp
->flags
,
845 BGP_FLAG_MULTIPATH_RELAX_AS_SET
)) {
847 /* aggregate attribute from multipath constituents */
848 aspath
= aspath_dup(attr
.aspath
);
849 origin
= attr
.origin
;
851 bgp_attr_get_community(&attr
)
852 ? community_dup(bgp_attr_get_community(&attr
))
854 ecomm
= (bgp_attr_get_ecommunity(&attr
))
855 ? ecommunity_dup(bgp_attr_get_ecommunity(&attr
))
857 lcomm
= (bgp_attr_get_lcommunity(&attr
))
858 ? lcommunity_dup(bgp_attr_get_lcommunity(&attr
))
861 for (mpinfo
= bgp_path_info_mpath_first(new_best
); mpinfo
;
862 mpinfo
= bgp_path_info_mpath_next(mpinfo
)) {
864 aspath_aggregate(aspath
, mpinfo
->attr
->aspath
);
868 if (origin
< mpinfo
->attr
->origin
)
869 origin
= mpinfo
->attr
->origin
;
871 if (bgp_attr_get_community(mpinfo
->attr
)) {
873 commerge
= community_merge(
875 bgp_attr_get_community(
878 community_uniq_sort(commerge
);
879 community_free(&commerge
);
881 community
= community_dup(
882 bgp_attr_get_community(
886 if (bgp_attr_get_ecommunity(mpinfo
->attr
)) {
888 ecommerge
= ecommunity_merge(
889 ecomm
, bgp_attr_get_ecommunity(
891 ecomm
= ecommunity_uniq_sort(ecommerge
);
892 ecommunity_free(&ecommerge
);
894 ecomm
= ecommunity_dup(
895 bgp_attr_get_ecommunity(
898 if (bgp_attr_get_lcommunity(mpinfo
->attr
)) {
900 lcommerge
= lcommunity_merge(
901 lcomm
, bgp_attr_get_lcommunity(
903 lcomm
= lcommunity_uniq_sort(lcommerge
);
904 lcommunity_free(&lcommerge
);
906 lcomm
= lcommunity_dup(
907 bgp_attr_get_lcommunity(
912 attr
.aspath
= aspath
;
913 attr
.origin
= origin
;
915 bgp_attr_set_community(&attr
, community
);
917 bgp_attr_set_ecommunity(&attr
, ecomm
);
919 bgp_attr_set_lcommunity(&attr
, lcomm
);
921 /* Zap multipath attr nexthop so we set nexthop to self */
922 attr
.nexthop
.s_addr
= INADDR_ANY
;
923 memset(&attr
.mp_nexthop_global
, 0, sizeof(struct in6_addr
));
925 /* TODO: should we set ATOMIC_AGGREGATE and AGGREGATOR? */
928 new_attr
= bgp_attr_intern(&attr
);
930 if (new_attr
!= bgp_path_info_mpath_attr(new_best
)) {
931 if ((old_attr
= bgp_path_info_mpath_attr(new_best
)))
932 bgp_attr_unintern(&old_attr
);
933 bgp_path_info_mpath_attr_set(new_best
, new_attr
);
934 SET_FLAG(new_best
->flags
, BGP_PATH_ATTR_CHANGED
);
936 bgp_attr_unintern(&new_attr
);