]> git.proxmox.com Git - mirror_frr.git/blame - bgpd/bgp_addpath.c
yang: change EIGRP authentication enum name
[mirror_frr.git] / bgpd / bgp_addpath.c
CommitLineData
dcc68b5e
MS
1/*
2 * Addpath TX ID selection, and related utilities
3 * Copyright (C) 2018 Amazon.com, Inc. or its affiliates
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
2618a52e
DL
20#ifdef HAVE_CONFIG_H
21#include "config.h"
22#endif
23
dcc68b5e
MS
24#include "bgp_addpath.h"
25#include "bgp_route.h"
26
27static struct bgp_addpath_strategy_names strat_names[BGP_ADDPATH_MAX] = {
28 {
29 .config_name = "addpath-tx-all-paths",
30 .human_name = "All",
31 .human_description = "Advertise all paths via addpath",
32 .type_json_name = "addpathTxAllPaths",
33 .id_json_name = "addpathTxIdAll"
34 },
35 {
36 .config_name = "addpath-tx-bestpath-per-AS",
37 .human_name = "Best-Per-AS",
38 .human_description = "Advertise bestpath per AS via addpath",
39 .type_json_name = "addpathTxBestpathPerAS",
40 .id_json_name = "addpathTxIdBestPerAS"
41 }
42};
43
44static struct bgp_addpath_strategy_names unknown_names = {
45 .config_name = "addpath-tx-unknown",
46 .human_name = "Unknown-Addpath-Strategy",
47 .human_description = "Unknown Addpath Strategy",
48 .type_json_name = "addpathTxUnknown",
49 .id_json_name = "addpathTxIdUnknown"
50};
51
52/*
53 * Returns a structure full of strings associated with an addpath type. Will
54 * never return null.
55 */
56struct bgp_addpath_strategy_names *
57bgp_addpath_names(enum bgp_addpath_strat strat)
58{
59 if (strat < BGP_ADDPATH_MAX)
60 return &(strat_names[strat]);
61 else
62 return &unknown_names;
63};
64
65/*
66 * Returns if any peer is transmitting addpaths for a given afi/safi.
67 */
68int bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi,
69 safi_t safi)
70{
71 return d->total_peercount[afi][safi] > 0;
72}
73
74/*
75 * Initialize the BGP instance level data for addpath.
76 */
77void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data *d)
78{
79 safi_t safi;
80 afi_t afi;
81 int i;
82
83 for (afi = AFI_IP; afi < AFI_MAX; afi++) {
84 for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) {
85 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
86 d->id_allocators[afi][safi][i] = NULL;
87 d->peercount[afi][safi][i] = 0;
88 }
89 d->total_peercount[afi][safi] = 0;
90 }
91 }
92}
93
94/*
95 * Free up resources associated with BGP route info structures.
96 */
97void bgp_addpath_free_info_data(struct bgp_addpath_info_data *d,
98 struct bgp_addpath_node_data *nd)
99{
100 int i;
101
102 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
103 if (d->addpath_tx_id[i] != IDALLOC_INVALID)
104 idalloc_free_to_pool(&nd->free_ids[i],
105 d->addpath_tx_id[i]);
106 }
107}
108
109/*
110 * Return the addpath ID used to send a particular route, to a particular peer,
111 * in a particular AFI/SAFI.
112 */
113uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi,
114 struct bgp_addpath_info_data *d)
115{
116 if (peer->addpath_type[afi][safi] < BGP_ADDPATH_MAX)
117 return d->addpath_tx_id[peer->addpath_type[afi][safi]];
118 else
119 return IDALLOC_INVALID;
120}
121
122/*
123 * Returns true if the path has an assigned addpath ID for any of the addpath
124 * strategies.
125 */
126int bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d)
127{
128 int i;
129
130 for (i = 0; i < BGP_ADDPATH_MAX; i++)
131 if (d->addpath_tx_id[i] != 0)
132 return 1;
133
134 return 0;
135}
136
137/*
138 * Releases any ID's associated with the BGP prefix.
139 */
140void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data *bd,
141 struct bgp_addpath_node_data *nd, afi_t afi,
142 safi_t safi)
143{
144 int i;
145
146 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
147 idalloc_drain_pool(bd->id_allocators[afi][safi][i],
148 &(nd->free_ids[i]));
149 }
150}
151
152/*
153 * Check to see if the addpath strategy requires DMED to be configured to work.
154 */
155int bgp_addpath_dmed_required(int strategy)
156{
157 return strategy == BGP_ADDPATH_BEST_PER_AS;
158}
159
160/*
161 * Return true if this is a path we should advertise due to a
162 * configured addpath-tx knob
163 */
164int bgp_addpath_tx_path(enum bgp_addpath_strat strat,
165 struct bgp_path_info *pi)
166{
167 switch (strat) {
168 case BGP_ADDPATH_NONE:
169 return 0;
170 case BGP_ADDPATH_ALL:
171 return 1;
172 case BGP_ADDPATH_BEST_PER_AS:
173 if (CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED))
174 return 1;
175 else
176 return 0;
177 default:
178 return 0;
179 }
180}
181
6ff96d00
RW
182static void bgp_addpath_flush_type_rn(struct bgp *bgp, afi_t afi, safi_t safi,
183 enum bgp_addpath_strat addpath_type,
184 struct bgp_node *rn)
185{
186 struct bgp_path_info *pi;
187
188 idalloc_drain_pool(
189 bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
190 &(rn->tx_addpath.free_ids[addpath_type]));
191 for (pi = bgp_node_get_bgp_path_info(rn); pi; pi = pi->next) {
192 if (pi->tx_addpath.addpath_tx_id[addpath_type]
193 != IDALLOC_INVALID) {
194 idalloc_free(
195 bgp->tx_addpath
196 .id_allocators[afi][safi][addpath_type],
197 pi->tx_addpath.addpath_tx_id[addpath_type]);
198 pi->tx_addpath.addpath_tx_id[addpath_type] =
199 IDALLOC_INVALID;
200 }
201 }
202}
203
dcc68b5e
MS
204/*
205 * Purge all addpath ID's on a BGP instance associated with the addpath
206 * strategy, and afi/safi combination. This lets us let go of all memory held to
207 * track ID numbers associated with an addpath type not in use. Since
208 * post-bestpath ID processing is skipped for types not used, this is the only
209 * chance to free this data.
210 */
211static void bgp_addpath_flush_type(struct bgp *bgp, afi_t afi, safi_t safi,
212 enum bgp_addpath_strat addpath_type)
213{
6ff96d00 214 struct bgp_node *rn, *nrn;
dcc68b5e
MS
215
216 for (rn = bgp_table_top(bgp->rib[afi][safi]); rn;
217 rn = bgp_route_next(rn)) {
6ff96d00
RW
218 if (safi == SAFI_MPLS_VPN) {
219 struct bgp_table *table;
220
221 table = bgp_node_get_bgp_table_info(rn);
222 if (!table)
223 continue;
224
225 for (nrn = bgp_table_top(table); nrn;
226 nrn = bgp_route_next(nrn))
227 bgp_addpath_flush_type_rn(bgp, afi, safi,
228 addpath_type, nrn);
229 } else {
230 bgp_addpath_flush_type_rn(bgp, afi, safi, addpath_type,
231 rn);
dcc68b5e
MS
232 }
233 }
234
235 idalloc_destroy(bgp->tx_addpath.id_allocators[afi][safi][addpath_type]);
236 bgp->tx_addpath.id_allocators[afi][safi][addpath_type] = NULL;
237}
238
239/*
240 * Allocate an Addpath ID for the given type on a path, if necessary.
241 */
242static void bgp_addpath_populate_path(struct id_alloc *allocator,
243 struct bgp_path_info *path,
244 enum bgp_addpath_strat addpath_type)
245{
246 if (bgp_addpath_tx_path(addpath_type, path)) {
247 path->tx_addpath.addpath_tx_id[addpath_type] =
248 idalloc_allocate(allocator);
249 }
250}
251
252/*
253 * Compute addpath ID's on a BGP instance associated with the addpath strategy,
254 * and afi/safi combination. Since we won't waste the time computing addpath IDs
255 * for unused strategies, the first time a peer is configured to use a strategy,
256 * we have to backfill the data.
257 */
258static void bgp_addpath_populate_type(struct bgp *bgp, afi_t afi, safi_t safi,
259 enum bgp_addpath_strat addpath_type)
260{
6ff96d00 261 struct bgp_node *rn, *nrn;
dcc68b5e
MS
262 char buf[200];
263 struct id_alloc *allocator;
264
265 snprintf(buf, sizeof(buf), "Addpath ID Allocator %s:%d/%d",
266 bgp_addpath_names(addpath_type)->config_name, (int)afi,
267 (int)safi);
268 buf[sizeof(buf) - 1] = '\0';
269 zlog_info("Computing addpath IDs for addpath type %s",
270 bgp_addpath_names(addpath_type)->human_name);
271
272 bgp->tx_addpath.id_allocators[afi][safi][addpath_type] =
273 idalloc_new(buf);
274
275 idalloc_reserve(bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
276 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
277
278 allocator = bgp->tx_addpath.id_allocators[afi][safi][addpath_type];
279
280 for (rn = bgp_table_top(bgp->rib[afi][safi]); rn;
6ff96d00
RW
281 rn = bgp_route_next(rn)) {
282 struct bgp_path_info *bi;
283
284 if (safi == SAFI_MPLS_VPN) {
285 struct bgp_table *table;
286
287 table = bgp_node_get_bgp_table_info(rn);
288 if (!table)
289 continue;
290
291 for (nrn = bgp_table_top(table); nrn;
292 nrn = bgp_route_next(nrn))
293 for (bi = bgp_node_get_bgp_path_info(nrn); bi;
294 bi = bi->next)
295 bgp_addpath_populate_path(allocator, bi,
296 addpath_type);
297 } else {
298 for (bi = bgp_node_get_bgp_path_info(rn); bi;
299 bi = bi->next)
300 bgp_addpath_populate_path(allocator, bi,
301 addpath_type);
302 }
303 }
dcc68b5e
MS
304}
305
306/*
307 * Handle updates to a peer or group's addpath strategy. If after adjusting
308 * counts a addpath strategy is in use for the first time, or no longer in use,
309 * the IDs for that strategy will be populated or flushed.
310 */
311void bgp_addpath_type_changed(struct bgp *bgp)
312{
313 afi_t afi;
314 safi_t safi;
315 struct listnode *node, *nnode;
316 struct peer *peer;
317 int peer_count[AFI_MAX][SAFI_MAX][BGP_ADDPATH_MAX];
318 enum bgp_addpath_strat type;
319
320 FOREACH_AFI_SAFI(afi, safi) {
321 for (type=0; type<BGP_ADDPATH_MAX; type++) {
322 peer_count[afi][safi][type] = 0;
323 }
324 }
325
326 for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
327 FOREACH_AFI_SAFI(afi, safi) {
328 type = peer->addpath_type[afi][safi];
329 if (type != BGP_ADDPATH_NONE) {
330 peer_count[afi][safi][type] += 1;
331 }
332 }
333 }
334
335 FOREACH_AFI_SAFI(afi, safi) {
336 for (type=0; type<BGP_ADDPATH_MAX; type++) {
337 int old = bgp->tx_addpath.peercount[afi][safi][type];
338 int new = peer_count[afi][safi][type];
339
340 bgp->tx_addpath.peercount[afi][safi][type] = new;
341
342 if (old == 0 && new != 0) {
343 bgp_addpath_populate_type(bgp, afi, safi,
344 type);
345 } else if (old != 0 && new == 0) {
346 bgp_addpath_flush_type(bgp, afi, safi, type);
347 }
348 }
349 }
350}
351
352/*
353 * Change the addpath type assigned to a peer, or peer group. In addition to
354 * adjusting the counts, peer sessions will be reset as needed to make the
355 * change take effect.
356 */
357void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi,
358 enum bgp_addpath_strat addpath_type)
359{
360 struct bgp *bgp = peer->bgp;
361 enum bgp_addpath_strat old_type = peer->addpath_type[afi][safi];
362 struct listnode *node, *nnode;
363 struct peer *tmp_peer;
364 struct peer_group *group;
365
366 if (addpath_type == old_type)
367 return;
368
369 if (addpath_type == BGP_ADDPATH_NONE && peer->group &&
370 !CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
371 /* A "no" config on a group member inherits group */
372 addpath_type = peer->group->conf->addpath_type[afi][safi];
373 }
374
375 peer->addpath_type[afi][safi] = addpath_type;
376
377 bgp_addpath_type_changed(bgp);
378
379 if (addpath_type != BGP_ADDPATH_NONE) {
380 if (bgp_addpath_dmed_required(addpath_type)) {
381 if (!bgp_flag_check(bgp, BGP_FLAG_DETERMINISTIC_MED)) {
382 zlog_warn(
383 "%s: enabling bgp deterministic-med, this is required for addpath-tx-bestpath-per-AS",
384 peer->host);
385 bgp_flag_set(bgp, BGP_FLAG_DETERMINISTIC_MED);
386 bgp_recalculate_all_bestpaths(bgp);
387 }
388 }
389 }
390
9165c5f5 391 zlog_info("Resetting peer %s%s due to change in addpath config",
dcc68b5e
MS
392 CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP) ? "group " : "",
393 peer->host);
394
395 if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
396 group = peer->group;
397
398 /* group will be null as peer_group_delete calls peer_delete on
399 * group->conf. That peer_delete will eventuallly end up here
400 * if the group was configured to tx addpaths.
401 */
402 if (group != NULL) {
403 for (ALL_LIST_ELEMENTS(group->peer, node, nnode,
404 tmp_peer)) {
405 if (tmp_peer->addpath_type[afi][safi] ==
406 old_type) {
407 bgp_addpath_set_peer_type(tmp_peer,
408 afi,
409 safi,
410 addpath_type);
411 }
412 }
413 }
414 } else {
415 peer_change_action(peer, afi, safi, peer_change_reset);
416 }
417
418}
419
420/*
421 * Intended to run after bestpath. This function will take TX IDs from paths
422 * that no longer need them, and give them to paths that do. This prevents
423 * best-per-as updates from needing to do a separate withdraw and update just to
424 * swap out which path is sent.
425 */
426void bgp_addpath_update_ids(struct bgp *bgp, struct bgp_node *bn, afi_t afi,
427 safi_t safi)
428{
429 int i;
430 struct bgp_path_info *pi;
431 struct id_alloc_pool **pool_ptr;
432
433 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
434 struct id_alloc *alloc =
435 bgp->tx_addpath.id_allocators[afi][safi][i];
436 pool_ptr = &(bn->tx_addpath.free_ids[i]);
437
438 if (bgp->tx_addpath.peercount[afi][safi][i] == 0)
439 continue;
440
441 /* Free Unused IDs back to the pool.*/
6f94b685 442 for (pi = bgp_node_get_bgp_path_info(bn); pi; pi = pi->next) {
dcc68b5e
MS
443 if (pi->tx_addpath.addpath_tx_id[i] != IDALLOC_INVALID
444 && !bgp_addpath_tx_path(i, pi)) {
445 idalloc_free_to_pool(pool_ptr,
446 pi->tx_addpath.addpath_tx_id[i]);
447 pi->tx_addpath.addpath_tx_id[i] =
448 IDALLOC_INVALID;
449 }
450 }
451
452 /* Give IDs to paths that need them (pulling from the pool) */
6f94b685 453 for (pi = bgp_node_get_bgp_path_info(bn); pi; pi = pi->next) {
dcc68b5e
MS
454 if (pi->tx_addpath.addpath_tx_id[i] == IDALLOC_INVALID
455 && bgp_addpath_tx_path(i, pi)) {
456 pi->tx_addpath.addpath_tx_id[i] =
457 idalloc_allocate_prefer_pool(
458 alloc, pool_ptr);
459 }
460 }
461
462 /* Free any IDs left in the pool to the main allocator */
463 idalloc_drain_pool(alloc, pool_ptr);
464 }
465}