]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_addpath.c
bgpd: Add missing enum's to case statement
[mirror_frr.git] / bgpd / bgp_addpath.c
1 /*
2 * Addpath TX ID selection, and related utilities
3 * Copyright (C) 2018 Amazon.com, Inc. or its affiliates
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "bgp_addpath.h"
25 #include "bgp_route.h"
26
27 static const struct bgp_addpath_strategy_names strat_names[BGP_ADDPATH_MAX] = {
28 {
29 .config_name = "addpath-tx-all-paths",
30 .human_name = "All",
31 .human_description = "Advertise all paths via addpath",
32 .type_json_name = "addpathTxAllPaths",
33 .id_json_name = "addpathTxIdAll"
34 },
35 {
36 .config_name = "addpath-tx-bestpath-per-AS",
37 .human_name = "Best-Per-AS",
38 .human_description = "Advertise bestpath per AS via addpath",
39 .type_json_name = "addpathTxBestpathPerAS",
40 .id_json_name = "addpathTxIdBestPerAS"
41 }
42 };
43
44 static const struct bgp_addpath_strategy_names unknown_names = {
45 .config_name = "addpath-tx-unknown",
46 .human_name = "Unknown-Addpath-Strategy",
47 .human_description = "Unknown Addpath Strategy",
48 .type_json_name = "addpathTxUnknown",
49 .id_json_name = "addpathTxIdUnknown"
50 };
51
52 /*
53 * Returns a structure full of strings associated with an addpath type. Will
54 * never return null.
55 */
56 const struct bgp_addpath_strategy_names *
57 bgp_addpath_names(enum bgp_addpath_strat strat)
58 {
59 if (strat < BGP_ADDPATH_MAX)
60 return &(strat_names[strat]);
61 else
62 return &unknown_names;
63 };
64
65 /*
66 * Returns if any peer is transmitting addpaths for a given afi/safi.
67 */
68 bool bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi,
69 safi_t safi)
70 {
71 return d->total_peercount[afi][safi] > 0;
72 }
73
74 /*
75 * Initialize the BGP instance level data for addpath.
76 */
77 void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data *d)
78 {
79 safi_t safi;
80 afi_t afi;
81 int i;
82
83 FOREACH_AFI_SAFI (afi, safi) {
84 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
85 d->id_allocators[afi][safi][i] = NULL;
86 d->peercount[afi][safi][i] = 0;
87 }
88 d->total_peercount[afi][safi] = 0;
89 }
90 }
91
92 /*
93 * Free up resources associated with BGP route info structures.
94 */
95 void bgp_addpath_free_info_data(struct bgp_addpath_info_data *d,
96 struct bgp_addpath_node_data *nd)
97 {
98 int i;
99
100 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
101 if (d->addpath_tx_id[i] != IDALLOC_INVALID)
102 idalloc_free_to_pool(&nd->free_ids[i],
103 d->addpath_tx_id[i]);
104 }
105 }
106
107 /*
108 * Return the addpath ID used to send a particular route, to a particular peer,
109 * in a particular AFI/SAFI.
110 */
111 uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi,
112 struct bgp_addpath_info_data *d)
113 {
114 if (safi == SAFI_LABELED_UNICAST)
115 safi = SAFI_UNICAST;
116
117 if (peer->addpath_type[afi][safi] < BGP_ADDPATH_MAX)
118 return d->addpath_tx_id[peer->addpath_type[afi][safi]];
119 else
120 return IDALLOC_INVALID;
121 }
122
123 /*
124 * Returns true if the path has an assigned addpath ID for any of the addpath
125 * strategies.
126 */
127 bool bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d)
128 {
129 int i;
130
131 for (i = 0; i < BGP_ADDPATH_MAX; i++)
132 if (d->addpath_tx_id[i] != 0)
133 return true;
134
135 return false;
136 }
137
138 /*
139 * Releases any ID's associated with the BGP prefix.
140 */
141 void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data *bd,
142 struct bgp_addpath_node_data *nd, afi_t afi,
143 safi_t safi)
144 {
145 int i;
146
147 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
148 idalloc_drain_pool(bd->id_allocators[afi][safi][i],
149 &(nd->free_ids[i]));
150 }
151 }
152
153 /*
154 * Check to see if the addpath strategy requires DMED to be configured to work.
155 */
156 bool bgp_addpath_dmed_required(int strategy)
157 {
158 return strategy == BGP_ADDPATH_BEST_PER_AS;
159 }
160
161 /*
162 * Return true if this is a path we should advertise due to a
163 * configured addpath-tx knob
164 */
165 bool bgp_addpath_tx_path(enum bgp_addpath_strat strat, struct bgp_path_info *pi)
166 {
167 switch (strat) {
168 case BGP_ADDPATH_NONE:
169 return false;
170 case BGP_ADDPATH_ALL:
171 return true;
172 case BGP_ADDPATH_BEST_PER_AS:
173 if (CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED))
174 return true;
175 else
176 return false;
177 case BGP_ADDPATH_MAX:
178 return false;
179 }
180
181 assert(!"Reached end of function we should never hit");
182 }
183
184 static void bgp_addpath_flush_type_rn(struct bgp *bgp, afi_t afi, safi_t safi,
185 enum bgp_addpath_strat addpath_type,
186 struct bgp_dest *dest)
187 {
188 struct bgp_path_info *pi;
189
190 if (safi == SAFI_LABELED_UNICAST)
191 safi = SAFI_UNICAST;
192
193 idalloc_drain_pool(
194 bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
195 &(dest->tx_addpath.free_ids[addpath_type]));
196 for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
197 if (pi->tx_addpath.addpath_tx_id[addpath_type]
198 != IDALLOC_INVALID) {
199 idalloc_free(
200 bgp->tx_addpath
201 .id_allocators[afi][safi][addpath_type],
202 pi->tx_addpath.addpath_tx_id[addpath_type]);
203 pi->tx_addpath.addpath_tx_id[addpath_type] =
204 IDALLOC_INVALID;
205 }
206 }
207 }
208
209 /*
210 * Purge all addpath ID's on a BGP instance associated with the addpath
211 * strategy, and afi/safi combination. This lets us let go of all memory held to
212 * track ID numbers associated with an addpath type not in use. Since
213 * post-bestpath ID processing is skipped for types not used, this is the only
214 * chance to free this data.
215 */
216 static void bgp_addpath_flush_type(struct bgp *bgp, afi_t afi, safi_t safi,
217 enum bgp_addpath_strat addpath_type)
218 {
219 struct bgp_dest *dest, *ndest;
220
221 if (safi == SAFI_LABELED_UNICAST)
222 safi = SAFI_UNICAST;
223
224 for (dest = bgp_table_top(bgp->rib[afi][safi]); dest;
225 dest = bgp_route_next(dest)) {
226 if (safi == SAFI_MPLS_VPN) {
227 struct bgp_table *table;
228
229 table = bgp_dest_get_bgp_table_info(dest);
230 if (!table)
231 continue;
232
233 for (ndest = bgp_table_top(table); ndest;
234 ndest = bgp_route_next(ndest))
235 bgp_addpath_flush_type_rn(bgp, afi, safi,
236 addpath_type, ndest);
237 } else {
238 bgp_addpath_flush_type_rn(bgp, afi, safi, addpath_type,
239 dest);
240 }
241 }
242
243 idalloc_destroy(bgp->tx_addpath.id_allocators[afi][safi][addpath_type]);
244 bgp->tx_addpath.id_allocators[afi][safi][addpath_type] = NULL;
245 }
246
247 /*
248 * Allocate an Addpath ID for the given type on a path, if necessary.
249 */
250 static void bgp_addpath_populate_path(struct id_alloc *allocator,
251 struct bgp_path_info *path,
252 enum bgp_addpath_strat addpath_type)
253 {
254 if (bgp_addpath_tx_path(addpath_type, path)) {
255 path->tx_addpath.addpath_tx_id[addpath_type] =
256 idalloc_allocate(allocator);
257 }
258 }
259
260 /*
261 * Compute addpath ID's on a BGP instance associated with the addpath strategy,
262 * and afi/safi combination. Since we won't waste the time computing addpath IDs
263 * for unused strategies, the first time a peer is configured to use a strategy,
264 * we have to backfill the data.
265 * In labeled-unicast, addpath allocations SHOULD be done in unicast SAFI.
266 */
267 static void bgp_addpath_populate_type(struct bgp *bgp, afi_t afi, safi_t safi,
268 enum bgp_addpath_strat addpath_type)
269 {
270 struct bgp_dest *dest, *ndest;
271 char buf[200];
272 struct id_alloc *allocator;
273
274 if (safi == SAFI_LABELED_UNICAST)
275 safi = SAFI_UNICAST;
276
277 snprintf(buf, sizeof(buf), "Addpath ID Allocator %s:%d/%d",
278 bgp_addpath_names(addpath_type)->config_name, (int)afi,
279 (int)safi);
280 buf[sizeof(buf) - 1] = '\0';
281 zlog_info("Computing addpath IDs for addpath type %s",
282 bgp_addpath_names(addpath_type)->human_name);
283
284 bgp->tx_addpath.id_allocators[afi][safi][addpath_type] =
285 idalloc_new(buf);
286
287 idalloc_reserve(bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
288 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
289
290 allocator = bgp->tx_addpath.id_allocators[afi][safi][addpath_type];
291
292 for (dest = bgp_table_top(bgp->rib[afi][safi]); dest;
293 dest = bgp_route_next(dest)) {
294 struct bgp_path_info *bi;
295
296 if (safi == SAFI_MPLS_VPN) {
297 struct bgp_table *table;
298
299 table = bgp_dest_get_bgp_table_info(dest);
300 if (!table)
301 continue;
302
303 for (ndest = bgp_table_top(table); ndest;
304 ndest = bgp_route_next(ndest))
305 for (bi = bgp_dest_get_bgp_path_info(ndest); bi;
306 bi = bi->next)
307 bgp_addpath_populate_path(allocator, bi,
308 addpath_type);
309 } else {
310 for (bi = bgp_dest_get_bgp_path_info(dest); bi;
311 bi = bi->next)
312 bgp_addpath_populate_path(allocator, bi,
313 addpath_type);
314 }
315 }
316 }
317
318 /*
319 * Handle updates to a peer or group's addpath strategy. If after adjusting
320 * counts a addpath strategy is in use for the first time, or no longer in use,
321 * the IDs for that strategy will be populated or flushed.
322 */
323 void bgp_addpath_type_changed(struct bgp *bgp)
324 {
325 afi_t afi;
326 safi_t safi;
327 struct listnode *node, *nnode;
328 struct peer *peer;
329 int peer_count[AFI_MAX][SAFI_MAX][BGP_ADDPATH_MAX];
330 enum bgp_addpath_strat type;
331
332 FOREACH_AFI_SAFI(afi, safi) {
333 for (type=0; type<BGP_ADDPATH_MAX; type++) {
334 peer_count[afi][safi][type] = 0;
335 }
336 bgp->tx_addpath.total_peercount[afi][safi] = 0;
337 }
338
339 for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
340 FOREACH_AFI_SAFI(afi, safi) {
341 type = peer->addpath_type[afi][safi];
342 if (type != BGP_ADDPATH_NONE) {
343 peer_count[afi][safi][type] += 1;
344 bgp->tx_addpath.total_peercount[afi][safi] += 1;
345 }
346 }
347 }
348
349 FOREACH_AFI_SAFI(afi, safi) {
350 for (type=0; type<BGP_ADDPATH_MAX; type++) {
351 int old = bgp->tx_addpath.peercount[afi][safi][type];
352 int new = peer_count[afi][safi][type];
353
354 bgp->tx_addpath.peercount[afi][safi][type] = new;
355
356 if (old == 0 && new != 0) {
357 bgp_addpath_populate_type(bgp, afi, safi,
358 type);
359 } else if (old != 0 && new == 0) {
360 bgp_addpath_flush_type(bgp, afi, safi, type);
361 }
362 }
363 }
364 }
365
366 /*
367 * Change the addpath type assigned to a peer, or peer group. In addition to
368 * adjusting the counts, peer sessions will be reset as needed to make the
369 * change take effect.
370 */
371 void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi,
372 enum bgp_addpath_strat addpath_type)
373 {
374 struct bgp *bgp = peer->bgp;
375 enum bgp_addpath_strat old_type;
376 struct listnode *node, *nnode;
377 struct peer *tmp_peer;
378 struct peer_group *group;
379
380 if (safi == SAFI_LABELED_UNICAST)
381 safi = SAFI_UNICAST;
382
383 old_type = peer->addpath_type[afi][safi];
384 if (addpath_type == old_type)
385 return;
386
387 if (addpath_type == BGP_ADDPATH_NONE && peer->group &&
388 !CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
389 /* A "no" config on a group member inherits group */
390 addpath_type = peer->group->conf->addpath_type[afi][safi];
391 }
392
393 peer->addpath_type[afi][safi] = addpath_type;
394
395 bgp_addpath_type_changed(bgp);
396
397 if (addpath_type != BGP_ADDPATH_NONE) {
398 if (bgp_addpath_dmed_required(addpath_type)) {
399 if (!CHECK_FLAG(bgp->flags,
400 BGP_FLAG_DETERMINISTIC_MED)) {
401 zlog_warn(
402 "%s: enabling bgp deterministic-med, this is required for addpath-tx-bestpath-per-AS",
403 peer->host);
404 SET_FLAG(bgp->flags,
405 BGP_FLAG_DETERMINISTIC_MED);
406 bgp_recalculate_all_bestpaths(bgp);
407 }
408 }
409 }
410
411 zlog_info("Resetting peer %s%pBP due to change in addpath config",
412 CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP) ? "group " : "",
413 peer);
414
415 if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
416 group = peer->group;
417
418 /* group will be null as peer_group_delete calls peer_delete on
419 * group->conf. That peer_delete will eventuallly end up here
420 * if the group was configured to tx addpaths.
421 */
422 if (group != NULL) {
423 for (ALL_LIST_ELEMENTS(group->peer, node, nnode,
424 tmp_peer)) {
425 if (tmp_peer->addpath_type[afi][safi] ==
426 old_type) {
427 bgp_addpath_set_peer_type(tmp_peer,
428 afi,
429 safi,
430 addpath_type);
431 }
432 }
433 }
434 } else {
435 peer_change_action(peer, afi, safi, peer_change_reset);
436 }
437
438 }
439
440 /*
441 * Intended to run after bestpath. This function will take TX IDs from paths
442 * that no longer need them, and give them to paths that do. This prevents
443 * best-per-as updates from needing to do a separate withdraw and update just to
444 * swap out which path is sent.
445 */
446 void bgp_addpath_update_ids(struct bgp *bgp, struct bgp_dest *bn, afi_t afi,
447 safi_t safi)
448 {
449 int i;
450 struct bgp_path_info *pi;
451 struct id_alloc_pool **pool_ptr;
452
453 if (safi == SAFI_LABELED_UNICAST)
454 safi = SAFI_UNICAST;
455
456 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
457 struct id_alloc *alloc =
458 bgp->tx_addpath.id_allocators[afi][safi][i];
459 pool_ptr = &(bn->tx_addpath.free_ids[i]);
460
461 if (bgp->tx_addpath.peercount[afi][safi][i] == 0)
462 continue;
463
464 /* Free Unused IDs back to the pool.*/
465 for (pi = bgp_dest_get_bgp_path_info(bn); pi; pi = pi->next) {
466 if (pi->tx_addpath.addpath_tx_id[i] != IDALLOC_INVALID
467 && !bgp_addpath_tx_path(i, pi)) {
468 idalloc_free_to_pool(pool_ptr,
469 pi->tx_addpath.addpath_tx_id[i]);
470 pi->tx_addpath.addpath_tx_id[i] =
471 IDALLOC_INVALID;
472 }
473 }
474
475 /* Give IDs to paths that need them (pulling from the pool) */
476 for (pi = bgp_dest_get_bgp_path_info(bn); pi; pi = pi->next) {
477 if (pi->tx_addpath.addpath_tx_id[i] == IDALLOC_INVALID
478 && bgp_addpath_tx_path(i, pi)) {
479 pi->tx_addpath.addpath_tx_id[i] =
480 idalloc_allocate_prefer_pool(
481 alloc, pool_ptr);
482 }
483 }
484
485 /* Free any IDs left in the pool to the main allocator */
486 idalloc_drain_pool(alloc, pool_ptr);
487 }
488 }