2 * Addpath TX ID selection, and related utilities
3 * Copyright (C) 2018 Amazon.com, Inc. or its affiliates
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "bgp_addpath.h"
21 #include "bgp_route.h"
23 static struct bgp_addpath_strategy_names strat_names
[BGP_ADDPATH_MAX
] = {
25 .config_name
= "addpath-tx-all-paths",
27 .human_description
= "Advertise all paths via addpath",
28 .type_json_name
= "addpathTxAllPaths",
29 .id_json_name
= "addpathTxIdAll"
32 .config_name
= "addpath-tx-bestpath-per-AS",
33 .human_name
= "Best-Per-AS",
34 .human_description
= "Advertise bestpath per AS via addpath",
35 .type_json_name
= "addpathTxBestpathPerAS",
36 .id_json_name
= "addpathTxIdBestPerAS"
40 static struct bgp_addpath_strategy_names unknown_names
= {
41 .config_name
= "addpath-tx-unknown",
42 .human_name
= "Unknown-Addpath-Strategy",
43 .human_description
= "Unknown Addpath Strategy",
44 .type_json_name
= "addpathTxUnknown",
45 .id_json_name
= "addpathTxIdUnknown"
49 * Returns a structure full of strings associated with an addpath type. Will
52 struct bgp_addpath_strategy_names
*
53 bgp_addpath_names(enum bgp_addpath_strat strat
)
55 if (strat
< BGP_ADDPATH_MAX
)
56 return &(strat_names
[strat
]);
58 return &unknown_names
;
62 * Returns if any peer is transmitting addpaths for a given afi/safi.
64 int bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data
*d
, afi_t afi
,
67 return d
->total_peercount
[afi
][safi
] > 0;
71 * Initialize the BGP instance level data for addpath.
73 void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data
*d
)
79 for (afi
= AFI_IP
; afi
< AFI_MAX
; afi
++) {
80 for (safi
= SAFI_UNICAST
; safi
< SAFI_MAX
; safi
++) {
81 for (i
= 0; i
< BGP_ADDPATH_MAX
; i
++) {
82 d
->id_allocators
[afi
][safi
][i
] = NULL
;
83 d
->peercount
[afi
][safi
][i
] = 0;
85 d
->total_peercount
[afi
][safi
] = 0;
91 * Free up resources associated with BGP route info structures.
93 void bgp_addpath_free_info_data(struct bgp_addpath_info_data
*d
,
94 struct bgp_addpath_node_data
*nd
)
98 for (i
= 0; i
< BGP_ADDPATH_MAX
; i
++) {
99 if (d
->addpath_tx_id
[i
] != IDALLOC_INVALID
)
100 idalloc_free_to_pool(&nd
->free_ids
[i
],
101 d
->addpath_tx_id
[i
]);
106 * Return the addpath ID used to send a particular route, to a particular peer,
107 * in a particular AFI/SAFI.
109 uint32_t bgp_addpath_id_for_peer(struct peer
*peer
, afi_t afi
, safi_t safi
,
110 struct bgp_addpath_info_data
*d
)
112 if (peer
->addpath_type
[afi
][safi
] < BGP_ADDPATH_MAX
)
113 return d
->addpath_tx_id
[peer
->addpath_type
[afi
][safi
]];
115 return IDALLOC_INVALID
;
119 * Returns true if the path has an assigned addpath ID for any of the addpath
122 int bgp_addpath_info_has_ids(struct bgp_addpath_info_data
*d
)
126 for (i
= 0; i
< BGP_ADDPATH_MAX
; i
++)
127 if (d
->addpath_tx_id
[i
] != 0)
134 * Releases any ID's associated with the BGP prefix.
136 void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data
*bd
,
137 struct bgp_addpath_node_data
*nd
, afi_t afi
,
142 for (i
= 0; i
< BGP_ADDPATH_MAX
; i
++) {
143 idalloc_drain_pool(bd
->id_allocators
[afi
][safi
][i
],
149 * Check to see if the addpath strategy requires DMED to be configured to work.
151 int bgp_addpath_dmed_required(int strategy
)
153 return strategy
== BGP_ADDPATH_BEST_PER_AS
;
157 * Return true if this is a path we should advertise due to a
158 * configured addpath-tx knob
160 int bgp_addpath_tx_path(enum bgp_addpath_strat strat
,
161 struct bgp_path_info
*pi
)
164 case BGP_ADDPATH_NONE
:
166 case BGP_ADDPATH_ALL
:
168 case BGP_ADDPATH_BEST_PER_AS
:
169 if (CHECK_FLAG(pi
->flags
, BGP_PATH_DMED_SELECTED
))
179 * Purge all addpath ID's on a BGP instance associated with the addpath
180 * strategy, and afi/safi combination. This lets us let go of all memory held to
181 * track ID numbers associated with an addpath type not in use. Since
182 * post-bestpath ID processing is skipped for types not used, this is the only
183 * chance to free this data.
185 static void bgp_addpath_flush_type(struct bgp
*bgp
, afi_t afi
, safi_t safi
,
186 enum bgp_addpath_strat addpath_type
)
189 struct bgp_path_info
*pi
;
191 for (rn
= bgp_table_top(bgp
->rib
[afi
][safi
]); rn
;
192 rn
= bgp_route_next(rn
)) {
194 bgp
->tx_addpath
.id_allocators
[afi
][safi
][addpath_type
],
195 &(rn
->tx_addpath
.free_ids
[addpath_type
]));
196 for (pi
= rn
->info
; pi
; pi
= pi
->next
) {
197 if (pi
->tx_addpath
.addpath_tx_id
[addpath_type
]
198 != IDALLOC_INVALID
) {
201 .id_allocators
[afi
][safi
]
204 .addpath_tx_id
[addpath_type
]);
205 pi
->tx_addpath
.addpath_tx_id
[addpath_type
] =
211 idalloc_destroy(bgp
->tx_addpath
.id_allocators
[afi
][safi
][addpath_type
]);
212 bgp
->tx_addpath
.id_allocators
[afi
][safi
][addpath_type
] = NULL
;
216 * Allocate an Addpath ID for the given type on a path, if necessary.
218 static void bgp_addpath_populate_path(struct id_alloc
*allocator
,
219 struct bgp_path_info
*path
,
220 enum bgp_addpath_strat addpath_type
)
222 if (bgp_addpath_tx_path(addpath_type
, path
)) {
223 path
->tx_addpath
.addpath_tx_id
[addpath_type
] =
224 idalloc_allocate(allocator
);
229 * Compute addpath ID's on a BGP instance associated with the addpath strategy,
230 * and afi/safi combination. Since we won't waste the time computing addpath IDs
231 * for unused strategies, the first time a peer is configured to use a strategy,
232 * we have to backfill the data.
234 static void bgp_addpath_populate_type(struct bgp
*bgp
, afi_t afi
, safi_t safi
,
235 enum bgp_addpath_strat addpath_type
)
238 struct bgp_path_info
*bi
;
240 struct id_alloc
*allocator
;
242 snprintf(buf
, sizeof(buf
), "Addpath ID Allocator %s:%d/%d",
243 bgp_addpath_names(addpath_type
)->config_name
, (int)afi
,
245 buf
[sizeof(buf
) - 1] = '\0';
246 zlog_info("Computing addpath IDs for addpath type %s",
247 bgp_addpath_names(addpath_type
)->human_name
);
249 bgp
->tx_addpath
.id_allocators
[afi
][safi
][addpath_type
] =
252 idalloc_reserve(bgp
->tx_addpath
.id_allocators
[afi
][safi
][addpath_type
],
253 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE
);
255 allocator
= bgp
->tx_addpath
.id_allocators
[afi
][safi
][addpath_type
];
257 for (rn
= bgp_table_top(bgp
->rib
[afi
][safi
]); rn
;
258 rn
= bgp_route_next(rn
))
259 for (bi
= rn
->info
; bi
; bi
= bi
->next
)
260 bgp_addpath_populate_path(allocator
, bi
, addpath_type
);
264 * Handle updates to a peer or group's addpath strategy. If after adjusting
265 * counts a addpath strategy is in use for the first time, or no longer in use,
266 * the IDs for that strategy will be populated or flushed.
268 void bgp_addpath_type_changed(struct bgp
*bgp
)
272 struct listnode
*node
, *nnode
;
274 int peer_count
[AFI_MAX
][SAFI_MAX
][BGP_ADDPATH_MAX
];
275 enum bgp_addpath_strat type
;
277 FOREACH_AFI_SAFI(afi
, safi
) {
278 for (type
=0; type
<BGP_ADDPATH_MAX
; type
++) {
279 peer_count
[afi
][safi
][type
] = 0;
283 for (ALL_LIST_ELEMENTS(bgp
->peer
, node
, nnode
, peer
)) {
284 FOREACH_AFI_SAFI(afi
, safi
) {
285 type
= peer
->addpath_type
[afi
][safi
];
286 if (type
!= BGP_ADDPATH_NONE
) {
287 peer_count
[afi
][safi
][type
] += 1;
292 FOREACH_AFI_SAFI(afi
, safi
) {
293 for (type
=0; type
<BGP_ADDPATH_MAX
; type
++) {
294 int old
= bgp
->tx_addpath
.peercount
[afi
][safi
][type
];
295 int new = peer_count
[afi
][safi
][type
];
297 bgp
->tx_addpath
.peercount
[afi
][safi
][type
] = new;
299 if (old
== 0 && new != 0) {
300 bgp_addpath_populate_type(bgp
, afi
, safi
,
302 } else if (old
!= 0 && new == 0) {
303 bgp_addpath_flush_type(bgp
, afi
, safi
, type
);
310 * Change the addpath type assigned to a peer, or peer group. In addition to
311 * adjusting the counts, peer sessions will be reset as needed to make the
312 * change take effect.
314 void bgp_addpath_set_peer_type(struct peer
*peer
, afi_t afi
, safi_t safi
,
315 enum bgp_addpath_strat addpath_type
)
317 struct bgp
*bgp
= peer
->bgp
;
318 enum bgp_addpath_strat old_type
= peer
->addpath_type
[afi
][safi
];
319 struct listnode
*node
, *nnode
;
320 struct peer
*tmp_peer
;
321 struct peer_group
*group
;
323 if (addpath_type
== old_type
)
326 if (addpath_type
== BGP_ADDPATH_NONE
&& peer
->group
&&
327 !CHECK_FLAG(peer
->sflags
, PEER_STATUS_GROUP
)) {
328 /* A "no" config on a group member inherits group */
329 addpath_type
= peer
->group
->conf
->addpath_type
[afi
][safi
];
332 peer
->addpath_type
[afi
][safi
] = addpath_type
;
334 bgp_addpath_type_changed(bgp
);
336 if (addpath_type
!= BGP_ADDPATH_NONE
) {
337 if (bgp_addpath_dmed_required(addpath_type
)) {
338 if (!bgp_flag_check(bgp
, BGP_FLAG_DETERMINISTIC_MED
)) {
340 "%s: enabling bgp deterministic-med, this is required for addpath-tx-bestpath-per-AS",
342 bgp_flag_set(bgp
, BGP_FLAG_DETERMINISTIC_MED
);
343 bgp_recalculate_all_bestpaths(bgp
);
348 zlog_info("Resetting peer %s%s due to change in addpath config\n",
349 CHECK_FLAG(peer
->sflags
, PEER_STATUS_GROUP
) ? "group " : "",
352 if (CHECK_FLAG(peer
->sflags
, PEER_STATUS_GROUP
)) {
355 /* group will be null as peer_group_delete calls peer_delete on
356 * group->conf. That peer_delete will eventuallly end up here
357 * if the group was configured to tx addpaths.
360 for (ALL_LIST_ELEMENTS(group
->peer
, node
, nnode
,
362 if (tmp_peer
->addpath_type
[afi
][safi
] ==
364 bgp_addpath_set_peer_type(tmp_peer
,
372 peer_change_action(peer
, afi
, safi
, peer_change_reset
);
378 * Intended to run after bestpath. This function will take TX IDs from paths
379 * that no longer need them, and give them to paths that do. This prevents
380 * best-per-as updates from needing to do a separate withdraw and update just to
381 * swap out which path is sent.
383 void bgp_addpath_update_ids(struct bgp
*bgp
, struct bgp_node
*bn
, afi_t afi
,
387 struct bgp_path_info
*pi
;
388 struct id_alloc_pool
**pool_ptr
;
390 for (i
= 0; i
< BGP_ADDPATH_MAX
; i
++) {
391 struct id_alloc
*alloc
=
392 bgp
->tx_addpath
.id_allocators
[afi
][safi
][i
];
393 pool_ptr
= &(bn
->tx_addpath
.free_ids
[i
]);
395 if (bgp
->tx_addpath
.peercount
[afi
][safi
][i
] == 0)
398 /* Free Unused IDs back to the pool.*/
399 for (pi
= bn
->info
; pi
; pi
= pi
->next
) {
400 if (pi
->tx_addpath
.addpath_tx_id
[i
] != IDALLOC_INVALID
401 && !bgp_addpath_tx_path(i
, pi
)) {
402 idalloc_free_to_pool(pool_ptr
,
403 pi
->tx_addpath
.addpath_tx_id
[i
]);
404 pi
->tx_addpath
.addpath_tx_id
[i
] =
409 /* Give IDs to paths that need them (pulling from the pool) */
410 for (pi
= bn
->info
; pi
; pi
= pi
->next
) {
411 if (pi
->tx_addpath
.addpath_tx_id
[i
] == IDALLOC_INVALID
412 && bgp_addpath_tx_path(i
, pi
)) {
413 pi
->tx_addpath
.addpath_tx_id
[i
] =
414 idalloc_allocate_prefer_pool(
419 /* Free any IDs left in the pool to the main allocator */
420 idalloc_drain_pool(alloc
, pool_ptr
);