]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_addpath.c
bgpd: Refactor subgroup_announce_table() to reuse an existing helpers
[mirror_frr.git] / bgpd / bgp_addpath.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Addpath TX ID selection, and related utilities
4 * Copyright (C) 2018 Amazon.com, Inc. or its affiliates
5 */
6
7 #ifdef HAVE_CONFIG_H
8 #include "config.h"
9 #endif
10
11 #include "bgp_addpath.h"
12 #include "bgp_route.h"
13
14 static const struct bgp_addpath_strategy_names strat_names[BGP_ADDPATH_MAX] = {
15 {
16 .config_name = "addpath-tx-all-paths",
17 .human_name = "All",
18 .human_description = "Advertise all paths via addpath",
19 .type_json_name = "addpathTxAllPaths",
20 .id_json_name = "addpathTxIdAll"
21 },
22 {
23 .config_name = "addpath-tx-bestpath-per-AS",
24 .human_name = "Best-Per-AS",
25 .human_description = "Advertise bestpath per AS via addpath",
26 .type_json_name = "addpathTxBestpathPerAS",
27 .id_json_name = "addpathTxIdBestPerAS"
28 }
29 };
30
31 static const struct bgp_addpath_strategy_names unknown_names = {
32 .config_name = "addpath-tx-unknown",
33 .human_name = "Unknown-Addpath-Strategy",
34 .human_description = "Unknown Addpath Strategy",
35 .type_json_name = "addpathTxUnknown",
36 .id_json_name = "addpathTxIdUnknown"
37 };
38
39 /*
40 * Returns a structure full of strings associated with an addpath type. Will
41 * never return null.
42 */
43 const struct bgp_addpath_strategy_names *
44 bgp_addpath_names(enum bgp_addpath_strat strat)
45 {
46 if (strat < BGP_ADDPATH_MAX)
47 return &(strat_names[strat]);
48 else
49 return &unknown_names;
50 };
51
52 /*
53 * Returns if any peer is transmitting addpaths for a given afi/safi.
54 */
55 bool bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi,
56 safi_t safi)
57 {
58 return d->total_peercount[afi][safi] > 0;
59 }
60
61 /*
62 * Initialize the BGP instance level data for addpath.
63 */
64 void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data *d)
65 {
66 safi_t safi;
67 afi_t afi;
68 int i;
69
70 FOREACH_AFI_SAFI (afi, safi) {
71 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
72 d->id_allocators[afi][safi][i] = NULL;
73 d->peercount[afi][safi][i] = 0;
74 }
75 d->total_peercount[afi][safi] = 0;
76 }
77 }
78
79 /*
80 * Free up resources associated with BGP route info structures.
81 */
82 void bgp_addpath_free_info_data(struct bgp_addpath_info_data *d,
83 struct bgp_addpath_node_data *nd)
84 {
85 int i;
86
87 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
88 if (d->addpath_tx_id[i] != IDALLOC_INVALID)
89 idalloc_free_to_pool(&nd->free_ids[i],
90 d->addpath_tx_id[i]);
91 }
92 }
93
94 /*
95 * Return the addpath ID used to send a particular route, to a particular peer,
96 * in a particular AFI/SAFI.
97 */
98 uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi,
99 struct bgp_addpath_info_data *d)
100 {
101 if (safi == SAFI_LABELED_UNICAST)
102 safi = SAFI_UNICAST;
103
104 if (peer->addpath_type[afi][safi] < BGP_ADDPATH_MAX)
105 return d->addpath_tx_id[peer->addpath_type[afi][safi]];
106 else
107 return IDALLOC_INVALID;
108 }
109
110 /*
111 * Returns true if the path has an assigned addpath ID for any of the addpath
112 * strategies.
113 */
114 bool bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d)
115 {
116 int i;
117
118 for (i = 0; i < BGP_ADDPATH_MAX; i++)
119 if (d->addpath_tx_id[i] != 0)
120 return true;
121
122 return false;
123 }
124
125 /*
126 * Releases any ID's associated with the BGP prefix.
127 */
128 void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data *bd,
129 struct bgp_addpath_node_data *nd, afi_t afi,
130 safi_t safi)
131 {
132 int i;
133
134 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
135 idalloc_drain_pool(bd->id_allocators[afi][safi][i],
136 &(nd->free_ids[i]));
137 }
138 }
139
140 /*
141 * Check to see if the addpath strategy requires DMED to be configured to work.
142 */
143 bool bgp_addpath_dmed_required(int strategy)
144 {
145 return strategy == BGP_ADDPATH_BEST_PER_AS;
146 }
147
148 /*
149 * Return true if this is a path we should advertise due to a
150 * configured addpath-tx knob
151 */
152 bool bgp_addpath_tx_path(enum bgp_addpath_strat strat, struct bgp_path_info *pi)
153 {
154 switch (strat) {
155 case BGP_ADDPATH_NONE:
156 return false;
157 case BGP_ADDPATH_ALL:
158 return true;
159 case BGP_ADDPATH_BEST_PER_AS:
160 if (CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED))
161 return true;
162 else
163 return false;
164 case BGP_ADDPATH_MAX:
165 return false;
166 }
167
168 assert(!"Reached end of function we should never hit");
169 }
170
171 static void bgp_addpath_flush_type_rn(struct bgp *bgp, afi_t afi, safi_t safi,
172 enum bgp_addpath_strat addpath_type,
173 struct bgp_dest *dest)
174 {
175 struct bgp_path_info *pi;
176
177 if (safi == SAFI_LABELED_UNICAST)
178 safi = SAFI_UNICAST;
179
180 idalloc_drain_pool(
181 bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
182 &(dest->tx_addpath.free_ids[addpath_type]));
183 for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
184 if (pi->tx_addpath.addpath_tx_id[addpath_type]
185 != IDALLOC_INVALID) {
186 idalloc_free(
187 bgp->tx_addpath
188 .id_allocators[afi][safi][addpath_type],
189 pi->tx_addpath.addpath_tx_id[addpath_type]);
190 pi->tx_addpath.addpath_tx_id[addpath_type] =
191 IDALLOC_INVALID;
192 }
193 }
194 }
195
196 /*
197 * Purge all addpath ID's on a BGP instance associated with the addpath
198 * strategy, and afi/safi combination. This lets us let go of all memory held to
199 * track ID numbers associated with an addpath type not in use. Since
200 * post-bestpath ID processing is skipped for types not used, this is the only
201 * chance to free this data.
202 */
203 static void bgp_addpath_flush_type(struct bgp *bgp, afi_t afi, safi_t safi,
204 enum bgp_addpath_strat addpath_type)
205 {
206 struct bgp_dest *dest, *ndest;
207
208 if (safi == SAFI_LABELED_UNICAST)
209 safi = SAFI_UNICAST;
210
211 for (dest = bgp_table_top(bgp->rib[afi][safi]); dest;
212 dest = bgp_route_next(dest)) {
213 if (safi == SAFI_MPLS_VPN) {
214 struct bgp_table *table;
215
216 table = bgp_dest_get_bgp_table_info(dest);
217 if (!table)
218 continue;
219
220 for (ndest = bgp_table_top(table); ndest;
221 ndest = bgp_route_next(ndest))
222 bgp_addpath_flush_type_rn(bgp, afi, safi,
223 addpath_type, ndest);
224 } else {
225 bgp_addpath_flush_type_rn(bgp, afi, safi, addpath_type,
226 dest);
227 }
228 }
229
230 idalloc_destroy(bgp->tx_addpath.id_allocators[afi][safi][addpath_type]);
231 bgp->tx_addpath.id_allocators[afi][safi][addpath_type] = NULL;
232 }
233
234 /*
235 * Allocate an Addpath ID for the given type on a path, if necessary.
236 */
237 static void bgp_addpath_populate_path(struct id_alloc *allocator,
238 struct bgp_path_info *path,
239 enum bgp_addpath_strat addpath_type)
240 {
241 if (bgp_addpath_tx_path(addpath_type, path)) {
242 path->tx_addpath.addpath_tx_id[addpath_type] =
243 idalloc_allocate(allocator);
244 }
245 }
246
247 /*
248 * Compute addpath ID's on a BGP instance associated with the addpath strategy,
249 * and afi/safi combination. Since we won't waste the time computing addpath IDs
250 * for unused strategies, the first time a peer is configured to use a strategy,
251 * we have to backfill the data.
252 * In labeled-unicast, addpath allocations SHOULD be done in unicast SAFI.
253 */
254 static void bgp_addpath_populate_type(struct bgp *bgp, afi_t afi, safi_t safi,
255 enum bgp_addpath_strat addpath_type)
256 {
257 struct bgp_dest *dest, *ndest;
258 char buf[200];
259 struct id_alloc *allocator;
260
261 if (safi == SAFI_LABELED_UNICAST)
262 safi = SAFI_UNICAST;
263
264 snprintf(buf, sizeof(buf), "Addpath ID Allocator %s:%d/%d",
265 bgp_addpath_names(addpath_type)->config_name, (int)afi,
266 (int)safi);
267 buf[sizeof(buf) - 1] = '\0';
268 zlog_info("Computing addpath IDs for addpath type %s",
269 bgp_addpath_names(addpath_type)->human_name);
270
271 bgp->tx_addpath.id_allocators[afi][safi][addpath_type] =
272 idalloc_new(buf);
273
274 idalloc_reserve(bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
275 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
276
277 allocator = bgp->tx_addpath.id_allocators[afi][safi][addpath_type];
278
279 for (dest = bgp_table_top(bgp->rib[afi][safi]); dest;
280 dest = bgp_route_next(dest)) {
281 struct bgp_path_info *bi;
282
283 if (safi == SAFI_MPLS_VPN) {
284 struct bgp_table *table;
285
286 table = bgp_dest_get_bgp_table_info(dest);
287 if (!table)
288 continue;
289
290 for (ndest = bgp_table_top(table); ndest;
291 ndest = bgp_route_next(ndest))
292 for (bi = bgp_dest_get_bgp_path_info(ndest); bi;
293 bi = bi->next)
294 bgp_addpath_populate_path(allocator, bi,
295 addpath_type);
296 } else {
297 for (bi = bgp_dest_get_bgp_path_info(dest); bi;
298 bi = bi->next)
299 bgp_addpath_populate_path(allocator, bi,
300 addpath_type);
301 }
302 }
303 }
304
305 /*
306 * Handle updates to a peer or group's addpath strategy. If after adjusting
307 * counts a addpath strategy is in use for the first time, or no longer in use,
308 * the IDs for that strategy will be populated or flushed.
309 */
310 void bgp_addpath_type_changed(struct bgp *bgp)
311 {
312 afi_t afi;
313 safi_t safi;
314 struct listnode *node, *nnode;
315 struct peer *peer;
316 int peer_count[AFI_MAX][SAFI_MAX][BGP_ADDPATH_MAX];
317 enum bgp_addpath_strat type;
318
319 FOREACH_AFI_SAFI(afi, safi) {
320 for (type=0; type<BGP_ADDPATH_MAX; type++) {
321 peer_count[afi][safi][type] = 0;
322 }
323 bgp->tx_addpath.total_peercount[afi][safi] = 0;
324 }
325
326 for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
327 FOREACH_AFI_SAFI(afi, safi) {
328 type = peer->addpath_type[afi][safi];
329 if (type != BGP_ADDPATH_NONE) {
330 peer_count[afi][safi][type] += 1;
331 bgp->tx_addpath.total_peercount[afi][safi] += 1;
332 }
333 }
334 }
335
336 FOREACH_AFI_SAFI(afi, safi) {
337 for (type=0; type<BGP_ADDPATH_MAX; type++) {
338 int old = bgp->tx_addpath.peercount[afi][safi][type];
339 int new = peer_count[afi][safi][type];
340
341 bgp->tx_addpath.peercount[afi][safi][type] = new;
342
343 if (old == 0 && new != 0) {
344 bgp_addpath_populate_type(bgp, afi, safi,
345 type);
346 } else if (old != 0 && new == 0) {
347 bgp_addpath_flush_type(bgp, afi, safi, type);
348 }
349 }
350 }
351 }
352
353 /*
354 * Change the addpath type assigned to a peer, or peer group. In addition to
355 * adjusting the counts, peer sessions will be reset as needed to make the
356 * change take effect.
357 */
358 void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi,
359 enum bgp_addpath_strat addpath_type)
360 {
361 struct bgp *bgp = peer->bgp;
362 enum bgp_addpath_strat old_type;
363 struct listnode *node, *nnode;
364 struct peer *tmp_peer;
365 struct peer_group *group;
366
367 if (safi == SAFI_LABELED_UNICAST)
368 safi = SAFI_UNICAST;
369
370 old_type = peer->addpath_type[afi][safi];
371 if (addpath_type == old_type)
372 return;
373
374 if (addpath_type == BGP_ADDPATH_NONE && peer->group &&
375 !CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
376 /* A "no" config on a group member inherits group */
377 addpath_type = peer->group->conf->addpath_type[afi][safi];
378 }
379
380 peer->addpath_type[afi][safi] = addpath_type;
381
382 bgp_addpath_type_changed(bgp);
383
384 if (addpath_type != BGP_ADDPATH_NONE) {
385 if (bgp_addpath_dmed_required(addpath_type)) {
386 if (!CHECK_FLAG(bgp->flags,
387 BGP_FLAG_DETERMINISTIC_MED)) {
388 zlog_warn(
389 "%s: enabling bgp deterministic-med, this is required for addpath-tx-bestpath-per-AS",
390 peer->host);
391 SET_FLAG(bgp->flags,
392 BGP_FLAG_DETERMINISTIC_MED);
393 bgp_recalculate_all_bestpaths(bgp);
394 }
395 }
396 }
397
398 zlog_info("Resetting peer %s%pBP due to change in addpath config",
399 CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP) ? "group " : "",
400 peer);
401
402 if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
403 group = peer->group;
404
405 /* group will be null as peer_group_delete calls peer_delete on
406 * group->conf. That peer_delete will eventuallly end up here
407 * if the group was configured to tx addpaths.
408 */
409 if (group != NULL) {
410 for (ALL_LIST_ELEMENTS(group->peer, node, nnode,
411 tmp_peer)) {
412 if (tmp_peer->addpath_type[afi][safi] ==
413 old_type) {
414 bgp_addpath_set_peer_type(tmp_peer,
415 afi,
416 safi,
417 addpath_type);
418 }
419 }
420 }
421 } else {
422 peer_change_action(peer, afi, safi, peer_change_reset);
423 }
424
425 }
426
427 /*
428 * Intended to run after bestpath. This function will take TX IDs from paths
429 * that no longer need them, and give them to paths that do. This prevents
430 * best-per-as updates from needing to do a separate withdraw and update just to
431 * swap out which path is sent.
432 */
433 void bgp_addpath_update_ids(struct bgp *bgp, struct bgp_dest *bn, afi_t afi,
434 safi_t safi)
435 {
436 int i;
437 struct bgp_path_info *pi;
438 struct id_alloc_pool **pool_ptr;
439
440 if (safi == SAFI_LABELED_UNICAST)
441 safi = SAFI_UNICAST;
442
443 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
444 struct id_alloc *alloc =
445 bgp->tx_addpath.id_allocators[afi][safi][i];
446 pool_ptr = &(bn->tx_addpath.free_ids[i]);
447
448 if (bgp->tx_addpath.peercount[afi][safi][i] == 0)
449 continue;
450
451 /* Free Unused IDs back to the pool.*/
452 for (pi = bgp_dest_get_bgp_path_info(bn); pi; pi = pi->next) {
453 if (pi->tx_addpath.addpath_tx_id[i] != IDALLOC_INVALID
454 && !bgp_addpath_tx_path(i, pi)) {
455 idalloc_free_to_pool(pool_ptr,
456 pi->tx_addpath.addpath_tx_id[i]);
457 pi->tx_addpath.addpath_tx_id[i] =
458 IDALLOC_INVALID;
459 }
460 }
461
462 /* Give IDs to paths that need them (pulling from the pool) */
463 for (pi = bgp_dest_get_bgp_path_info(bn); pi; pi = pi->next) {
464 if (pi->tx_addpath.addpath_tx_id[i] == IDALLOC_INVALID
465 && bgp_addpath_tx_path(i, pi)) {
466 pi->tx_addpath.addpath_tx_id[i] =
467 idalloc_allocate_prefer_pool(
468 alloc, pool_ptr);
469 }
470 }
471
472 /* Free any IDs left in the pool to the main allocator */
473 idalloc_drain_pool(alloc, pool_ptr);
474 }
475 }