1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Addpath TX ID selection, and related utilities
4 * Copyright (C) 2018 Amazon.com, Inc. or its affiliates
11 #include "bgp_addpath.h"
12 #include "bgp_route.h"
14 static const struct bgp_addpath_strategy_names strat_names
[BGP_ADDPATH_MAX
] = {
16 .config_name
= "addpath-tx-all-paths",
18 .human_description
= "Advertise all paths via addpath",
19 .type_json_name
= "addpathTxAllPaths",
20 .id_json_name
= "addpathTxIdAll"
23 .config_name
= "addpath-tx-bestpath-per-AS",
24 .human_name
= "Best-Per-AS",
25 .human_description
= "Advertise bestpath per AS via addpath",
26 .type_json_name
= "addpathTxBestpathPerAS",
27 .id_json_name
= "addpathTxIdBestPerAS"
31 static const struct bgp_addpath_strategy_names unknown_names
= {
32 .config_name
= "addpath-tx-unknown",
33 .human_name
= "Unknown-Addpath-Strategy",
34 .human_description
= "Unknown Addpath Strategy",
35 .type_json_name
= "addpathTxUnknown",
36 .id_json_name
= "addpathTxIdUnknown"
40 * Returns a structure full of strings associated with an addpath type. Will
43 const struct bgp_addpath_strategy_names
*
44 bgp_addpath_names(enum bgp_addpath_strat strat
)
46 if (strat
< BGP_ADDPATH_MAX
)
47 return &(strat_names
[strat
]);
49 return &unknown_names
;
53 * Returns if any peer is transmitting addpaths for a given afi/safi.
55 bool bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data
*d
, afi_t afi
,
58 return d
->total_peercount
[afi
][safi
] > 0;
62 * Initialize the BGP instance level data for addpath.
64 void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data
*d
)
70 FOREACH_AFI_SAFI (afi
, safi
) {
71 for (i
= 0; i
< BGP_ADDPATH_MAX
; i
++) {
72 d
->id_allocators
[afi
][safi
][i
] = NULL
;
73 d
->peercount
[afi
][safi
][i
] = 0;
75 d
->total_peercount
[afi
][safi
] = 0;
80 * Free up resources associated with BGP route info structures.
82 void bgp_addpath_free_info_data(struct bgp_addpath_info_data
*d
,
83 struct bgp_addpath_node_data
*nd
)
87 for (i
= 0; i
< BGP_ADDPATH_MAX
; i
++) {
88 if (d
->addpath_tx_id
[i
] != IDALLOC_INVALID
)
89 idalloc_free_to_pool(&nd
->free_ids
[i
],
95 * Return the addpath ID used to send a particular route, to a particular peer,
96 * in a particular AFI/SAFI.
98 uint32_t bgp_addpath_id_for_peer(struct peer
*peer
, afi_t afi
, safi_t safi
,
99 struct bgp_addpath_info_data
*d
)
101 if (safi
== SAFI_LABELED_UNICAST
)
104 if (peer
->addpath_type
[afi
][safi
] < BGP_ADDPATH_MAX
)
105 return d
->addpath_tx_id
[peer
->addpath_type
[afi
][safi
]];
107 return IDALLOC_INVALID
;
111 * Returns true if the path has an assigned addpath ID for any of the addpath
114 bool bgp_addpath_info_has_ids(struct bgp_addpath_info_data
*d
)
118 for (i
= 0; i
< BGP_ADDPATH_MAX
; i
++)
119 if (d
->addpath_tx_id
[i
] != 0)
126 * Releases any ID's associated with the BGP prefix.
128 void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data
*bd
,
129 struct bgp_addpath_node_data
*nd
, afi_t afi
,
134 for (i
= 0; i
< BGP_ADDPATH_MAX
; i
++) {
135 idalloc_drain_pool(bd
->id_allocators
[afi
][safi
][i
],
141 * Check to see if the addpath strategy requires DMED to be configured to work.
143 bool bgp_addpath_dmed_required(int strategy
)
145 return strategy
== BGP_ADDPATH_BEST_PER_AS
;
149 * Return true if this is a path we should advertise due to a
150 * configured addpath-tx knob
152 bool bgp_addpath_tx_path(enum bgp_addpath_strat strat
, struct bgp_path_info
*pi
)
155 case BGP_ADDPATH_NONE
:
157 case BGP_ADDPATH_ALL
:
159 case BGP_ADDPATH_BEST_PER_AS
:
160 if (CHECK_FLAG(pi
->flags
, BGP_PATH_DMED_SELECTED
))
164 case BGP_ADDPATH_MAX
:
168 assert(!"Reached end of function we should never hit");
171 static void bgp_addpath_flush_type_rn(struct bgp
*bgp
, afi_t afi
, safi_t safi
,
172 enum bgp_addpath_strat addpath_type
,
173 struct bgp_dest
*dest
)
175 struct bgp_path_info
*pi
;
177 if (safi
== SAFI_LABELED_UNICAST
)
181 bgp
->tx_addpath
.id_allocators
[afi
][safi
][addpath_type
],
182 &(dest
->tx_addpath
.free_ids
[addpath_type
]));
183 for (pi
= bgp_dest_get_bgp_path_info(dest
); pi
; pi
= pi
->next
) {
184 if (pi
->tx_addpath
.addpath_tx_id
[addpath_type
]
185 != IDALLOC_INVALID
) {
188 .id_allocators
[afi
][safi
][addpath_type
],
189 pi
->tx_addpath
.addpath_tx_id
[addpath_type
]);
190 pi
->tx_addpath
.addpath_tx_id
[addpath_type
] =
197 * Purge all addpath ID's on a BGP instance associated with the addpath
198 * strategy, and afi/safi combination. This lets us let go of all memory held to
199 * track ID numbers associated with an addpath type not in use. Since
200 * post-bestpath ID processing is skipped for types not used, this is the only
201 * chance to free this data.
203 static void bgp_addpath_flush_type(struct bgp
*bgp
, afi_t afi
, safi_t safi
,
204 enum bgp_addpath_strat addpath_type
)
206 struct bgp_dest
*dest
, *ndest
;
208 if (safi
== SAFI_LABELED_UNICAST
)
211 for (dest
= bgp_table_top(bgp
->rib
[afi
][safi
]); dest
;
212 dest
= bgp_route_next(dest
)) {
213 if (safi
== SAFI_MPLS_VPN
) {
214 struct bgp_table
*table
;
216 table
= bgp_dest_get_bgp_table_info(dest
);
220 for (ndest
= bgp_table_top(table
); ndest
;
221 ndest
= bgp_route_next(ndest
))
222 bgp_addpath_flush_type_rn(bgp
, afi
, safi
,
223 addpath_type
, ndest
);
225 bgp_addpath_flush_type_rn(bgp
, afi
, safi
, addpath_type
,
230 idalloc_destroy(bgp
->tx_addpath
.id_allocators
[afi
][safi
][addpath_type
]);
231 bgp
->tx_addpath
.id_allocators
[afi
][safi
][addpath_type
] = NULL
;
235 * Allocate an Addpath ID for the given type on a path, if necessary.
237 static void bgp_addpath_populate_path(struct id_alloc
*allocator
,
238 struct bgp_path_info
*path
,
239 enum bgp_addpath_strat addpath_type
)
241 if (bgp_addpath_tx_path(addpath_type
, path
)) {
242 path
->tx_addpath
.addpath_tx_id
[addpath_type
] =
243 idalloc_allocate(allocator
);
248 * Compute addpath ID's on a BGP instance associated with the addpath strategy,
249 * and afi/safi combination. Since we won't waste the time computing addpath IDs
250 * for unused strategies, the first time a peer is configured to use a strategy,
251 * we have to backfill the data.
252 * In labeled-unicast, addpath allocations SHOULD be done in unicast SAFI.
254 static void bgp_addpath_populate_type(struct bgp
*bgp
, afi_t afi
, safi_t safi
,
255 enum bgp_addpath_strat addpath_type
)
257 struct bgp_dest
*dest
, *ndest
;
259 struct id_alloc
*allocator
;
261 if (safi
== SAFI_LABELED_UNICAST
)
264 snprintf(buf
, sizeof(buf
), "Addpath ID Allocator %s:%d/%d",
265 bgp_addpath_names(addpath_type
)->config_name
, (int)afi
,
267 buf
[sizeof(buf
) - 1] = '\0';
268 zlog_info("Computing addpath IDs for addpath type %s",
269 bgp_addpath_names(addpath_type
)->human_name
);
271 bgp
->tx_addpath
.id_allocators
[afi
][safi
][addpath_type
] =
274 idalloc_reserve(bgp
->tx_addpath
.id_allocators
[afi
][safi
][addpath_type
],
275 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE
);
277 allocator
= bgp
->tx_addpath
.id_allocators
[afi
][safi
][addpath_type
];
279 for (dest
= bgp_table_top(bgp
->rib
[afi
][safi
]); dest
;
280 dest
= bgp_route_next(dest
)) {
281 struct bgp_path_info
*bi
;
283 if (safi
== SAFI_MPLS_VPN
) {
284 struct bgp_table
*table
;
286 table
= bgp_dest_get_bgp_table_info(dest
);
290 for (ndest
= bgp_table_top(table
); ndest
;
291 ndest
= bgp_route_next(ndest
))
292 for (bi
= bgp_dest_get_bgp_path_info(ndest
); bi
;
294 bgp_addpath_populate_path(allocator
, bi
,
297 for (bi
= bgp_dest_get_bgp_path_info(dest
); bi
;
299 bgp_addpath_populate_path(allocator
, bi
,
306 * Handle updates to a peer or group's addpath strategy. If after adjusting
307 * counts a addpath strategy is in use for the first time, or no longer in use,
308 * the IDs for that strategy will be populated or flushed.
310 void bgp_addpath_type_changed(struct bgp
*bgp
)
314 struct listnode
*node
, *nnode
;
316 int peer_count
[AFI_MAX
][SAFI_MAX
][BGP_ADDPATH_MAX
];
317 enum bgp_addpath_strat type
;
319 FOREACH_AFI_SAFI(afi
, safi
) {
320 for (type
=0; type
<BGP_ADDPATH_MAX
; type
++) {
321 peer_count
[afi
][safi
][type
] = 0;
323 bgp
->tx_addpath
.total_peercount
[afi
][safi
] = 0;
326 for (ALL_LIST_ELEMENTS(bgp
->peer
, node
, nnode
, peer
)) {
327 FOREACH_AFI_SAFI(afi
, safi
) {
328 type
= peer
->addpath_type
[afi
][safi
];
329 if (type
!= BGP_ADDPATH_NONE
) {
330 peer_count
[afi
][safi
][type
] += 1;
331 bgp
->tx_addpath
.total_peercount
[afi
][safi
] += 1;
336 FOREACH_AFI_SAFI(afi
, safi
) {
337 for (type
=0; type
<BGP_ADDPATH_MAX
; type
++) {
338 int old
= bgp
->tx_addpath
.peercount
[afi
][safi
][type
];
339 int new = peer_count
[afi
][safi
][type
];
341 bgp
->tx_addpath
.peercount
[afi
][safi
][type
] = new;
343 if (old
== 0 && new != 0) {
344 bgp_addpath_populate_type(bgp
, afi
, safi
,
346 } else if (old
!= 0 && new == 0) {
347 bgp_addpath_flush_type(bgp
, afi
, safi
, type
);
354 * Change the addpath type assigned to a peer, or peer group. In addition to
355 * adjusting the counts, peer sessions will be reset as needed to make the
356 * change take effect.
358 void bgp_addpath_set_peer_type(struct peer
*peer
, afi_t afi
, safi_t safi
,
359 enum bgp_addpath_strat addpath_type
)
361 struct bgp
*bgp
= peer
->bgp
;
362 enum bgp_addpath_strat old_type
;
363 struct listnode
*node
, *nnode
;
364 struct peer
*tmp_peer
;
365 struct peer_group
*group
;
367 if (safi
== SAFI_LABELED_UNICAST
)
370 old_type
= peer
->addpath_type
[afi
][safi
];
371 if (addpath_type
== old_type
)
374 if (addpath_type
== BGP_ADDPATH_NONE
&& peer
->group
&&
375 !CHECK_FLAG(peer
->sflags
, PEER_STATUS_GROUP
)) {
376 /* A "no" config on a group member inherits group */
377 addpath_type
= peer
->group
->conf
->addpath_type
[afi
][safi
];
380 peer
->addpath_type
[afi
][safi
] = addpath_type
;
382 bgp_addpath_type_changed(bgp
);
384 if (addpath_type
!= BGP_ADDPATH_NONE
) {
385 if (bgp_addpath_dmed_required(addpath_type
)) {
386 if (!CHECK_FLAG(bgp
->flags
,
387 BGP_FLAG_DETERMINISTIC_MED
)) {
389 "%s: enabling bgp deterministic-med, this is required for addpath-tx-bestpath-per-AS",
392 BGP_FLAG_DETERMINISTIC_MED
);
393 bgp_recalculate_all_bestpaths(bgp
);
398 zlog_info("Resetting peer %s%pBP due to change in addpath config",
399 CHECK_FLAG(peer
->sflags
, PEER_STATUS_GROUP
) ? "group " : "",
402 if (CHECK_FLAG(peer
->sflags
, PEER_STATUS_GROUP
)) {
405 /* group will be null as peer_group_delete calls peer_delete on
406 * group->conf. That peer_delete will eventuallly end up here
407 * if the group was configured to tx addpaths.
410 for (ALL_LIST_ELEMENTS(group
->peer
, node
, nnode
,
412 if (tmp_peer
->addpath_type
[afi
][safi
] ==
414 bgp_addpath_set_peer_type(tmp_peer
,
422 peer_change_action(peer
, afi
, safi
, peer_change_reset
);
428 * Intended to run after bestpath. This function will take TX IDs from paths
429 * that no longer need them, and give them to paths that do. This prevents
430 * best-per-as updates from needing to do a separate withdraw and update just to
431 * swap out which path is sent.
433 void bgp_addpath_update_ids(struct bgp
*bgp
, struct bgp_dest
*bn
, afi_t afi
,
437 struct bgp_path_info
*pi
;
438 struct id_alloc_pool
**pool_ptr
;
440 if (safi
== SAFI_LABELED_UNICAST
)
443 for (i
= 0; i
< BGP_ADDPATH_MAX
; i
++) {
444 struct id_alloc
*alloc
=
445 bgp
->tx_addpath
.id_allocators
[afi
][safi
][i
];
446 pool_ptr
= &(bn
->tx_addpath
.free_ids
[i
]);
448 if (bgp
->tx_addpath
.peercount
[afi
][safi
][i
] == 0)
451 /* Free Unused IDs back to the pool.*/
452 for (pi
= bgp_dest_get_bgp_path_info(bn
); pi
; pi
= pi
->next
) {
453 if (pi
->tx_addpath
.addpath_tx_id
[i
] != IDALLOC_INVALID
454 && !bgp_addpath_tx_path(i
, pi
)) {
455 idalloc_free_to_pool(pool_ptr
,
456 pi
->tx_addpath
.addpath_tx_id
[i
]);
457 pi
->tx_addpath
.addpath_tx_id
[i
] =
462 /* Give IDs to paths that need them (pulling from the pool) */
463 for (pi
= bgp_dest_get_bgp_path_info(bn
); pi
; pi
= pi
->next
) {
464 if (pi
->tx_addpath
.addpath_tx_id
[i
] == IDALLOC_INVALID
465 && bgp_addpath_tx_path(i
, pi
)) {
466 pi
->tx_addpath
.addpath_tx_id
[i
] =
467 idalloc_allocate_prefer_pool(
472 /* Free any IDs left in the pool to the main allocator */
473 idalloc_drain_pool(alloc
, pool_ptr
);