]> git.proxmox.com Git - mirror_frr.git/blame - bgpd/bgp_addpath.c
bgpd: add addpath ID to adj_out tree sort
[mirror_frr.git] / bgpd / bgp_addpath.c
CommitLineData
dcc68b5e
MS
1/*
2 * Addpath TX ID selection, and related utilities
3 * Copyright (C) 2018 Amazon.com, Inc. or its affiliates
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20#include "bgp_addpath.h"
21#include "bgp_route.h"
22
23static struct bgp_addpath_strategy_names strat_names[BGP_ADDPATH_MAX] = {
24 {
25 .config_name = "addpath-tx-all-paths",
26 .human_name = "All",
27 .human_description = "Advertise all paths via addpath",
28 .type_json_name = "addpathTxAllPaths",
29 .id_json_name = "addpathTxIdAll"
30 },
31 {
32 .config_name = "addpath-tx-bestpath-per-AS",
33 .human_name = "Best-Per-AS",
34 .human_description = "Advertise bestpath per AS via addpath",
35 .type_json_name = "addpathTxBestpathPerAS",
36 .id_json_name = "addpathTxIdBestPerAS"
37 }
38};
39
40static struct bgp_addpath_strategy_names unknown_names = {
41 .config_name = "addpath-tx-unknown",
42 .human_name = "Unknown-Addpath-Strategy",
43 .human_description = "Unknown Addpath Strategy",
44 .type_json_name = "addpathTxUnknown",
45 .id_json_name = "addpathTxIdUnknown"
46};
47
48/*
49 * Returns a structure full of strings associated with an addpath type. Will
50 * never return null.
51 */
52struct bgp_addpath_strategy_names *
53bgp_addpath_names(enum bgp_addpath_strat strat)
54{
55 if (strat < BGP_ADDPATH_MAX)
56 return &(strat_names[strat]);
57 else
58 return &unknown_names;
59};
60
61/*
62 * Returns if any peer is transmitting addpaths for a given afi/safi.
63 */
64int bgp_addpath_is_addpath_used(struct bgp_addpath_bgp_data *d, afi_t afi,
65 safi_t safi)
66{
67 return d->total_peercount[afi][safi] > 0;
68}
69
70/*
71 * Initialize the BGP instance level data for addpath.
72 */
73void bgp_addpath_init_bgp_data(struct bgp_addpath_bgp_data *d)
74{
75 safi_t safi;
76 afi_t afi;
77 int i;
78
79 for (afi = AFI_IP; afi < AFI_MAX; afi++) {
80 for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) {
81 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
82 d->id_allocators[afi][safi][i] = NULL;
83 d->peercount[afi][safi][i] = 0;
84 }
85 d->total_peercount[afi][safi] = 0;
86 }
87 }
88}
89
90/*
91 * Free up resources associated with BGP route info structures.
92 */
93void bgp_addpath_free_info_data(struct bgp_addpath_info_data *d,
94 struct bgp_addpath_node_data *nd)
95{
96 int i;
97
98 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
99 if (d->addpath_tx_id[i] != IDALLOC_INVALID)
100 idalloc_free_to_pool(&nd->free_ids[i],
101 d->addpath_tx_id[i]);
102 }
103}
104
105/*
106 * Return the addpath ID used to send a particular route, to a particular peer,
107 * in a particular AFI/SAFI.
108 */
109uint32_t bgp_addpath_id_for_peer(struct peer *peer, afi_t afi, safi_t safi,
110 struct bgp_addpath_info_data *d)
111{
112 if (peer->addpath_type[afi][safi] < BGP_ADDPATH_MAX)
113 return d->addpath_tx_id[peer->addpath_type[afi][safi]];
114 else
115 return IDALLOC_INVALID;
116}
117
118/*
119 * Returns true if the path has an assigned addpath ID for any of the addpath
120 * strategies.
121 */
122int bgp_addpath_info_has_ids(struct bgp_addpath_info_data *d)
123{
124 int i;
125
126 for (i = 0; i < BGP_ADDPATH_MAX; i++)
127 if (d->addpath_tx_id[i] != 0)
128 return 1;
129
130 return 0;
131}
132
133/*
134 * Releases any ID's associated with the BGP prefix.
135 */
136void bgp_addpath_free_node_data(struct bgp_addpath_bgp_data *bd,
137 struct bgp_addpath_node_data *nd, afi_t afi,
138 safi_t safi)
139{
140 int i;
141
142 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
143 idalloc_drain_pool(bd->id_allocators[afi][safi][i],
144 &(nd->free_ids[i]));
145 }
146}
147
148/*
149 * Check to see if the addpath strategy requires DMED to be configured to work.
150 */
151int bgp_addpath_dmed_required(int strategy)
152{
153 return strategy == BGP_ADDPATH_BEST_PER_AS;
154}
155
156/*
157 * Return true if this is a path we should advertise due to a
158 * configured addpath-tx knob
159 */
160int bgp_addpath_tx_path(enum bgp_addpath_strat strat,
161 struct bgp_path_info *pi)
162{
163 switch (strat) {
164 case BGP_ADDPATH_NONE:
165 return 0;
166 case BGP_ADDPATH_ALL:
167 return 1;
168 case BGP_ADDPATH_BEST_PER_AS:
169 if (CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED))
170 return 1;
171 else
172 return 0;
173 default:
174 return 0;
175 }
176}
177
6ff96d00
RW
178static void bgp_addpath_flush_type_rn(struct bgp *bgp, afi_t afi, safi_t safi,
179 enum bgp_addpath_strat addpath_type,
180 struct bgp_node *rn)
181{
182 struct bgp_path_info *pi;
183
184 idalloc_drain_pool(
185 bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
186 &(rn->tx_addpath.free_ids[addpath_type]));
187 for (pi = bgp_node_get_bgp_path_info(rn); pi; pi = pi->next) {
188 if (pi->tx_addpath.addpath_tx_id[addpath_type]
189 != IDALLOC_INVALID) {
190 idalloc_free(
191 bgp->tx_addpath
192 .id_allocators[afi][safi][addpath_type],
193 pi->tx_addpath.addpath_tx_id[addpath_type]);
194 pi->tx_addpath.addpath_tx_id[addpath_type] =
195 IDALLOC_INVALID;
196 }
197 }
198}
199
dcc68b5e
MS
200/*
201 * Purge all addpath ID's on a BGP instance associated with the addpath
202 * strategy, and afi/safi combination. This lets us let go of all memory held to
203 * track ID numbers associated with an addpath type not in use. Since
204 * post-bestpath ID processing is skipped for types not used, this is the only
205 * chance to free this data.
206 */
207static void bgp_addpath_flush_type(struct bgp *bgp, afi_t afi, safi_t safi,
208 enum bgp_addpath_strat addpath_type)
209{
6ff96d00 210 struct bgp_node *rn, *nrn;
dcc68b5e
MS
211
212 for (rn = bgp_table_top(bgp->rib[afi][safi]); rn;
213 rn = bgp_route_next(rn)) {
6ff96d00
RW
214 if (safi == SAFI_MPLS_VPN) {
215 struct bgp_table *table;
216
217 table = bgp_node_get_bgp_table_info(rn);
218 if (!table)
219 continue;
220
221 for (nrn = bgp_table_top(table); nrn;
222 nrn = bgp_route_next(nrn))
223 bgp_addpath_flush_type_rn(bgp, afi, safi,
224 addpath_type, nrn);
225 } else {
226 bgp_addpath_flush_type_rn(bgp, afi, safi, addpath_type,
227 rn);
dcc68b5e
MS
228 }
229 }
230
231 idalloc_destroy(bgp->tx_addpath.id_allocators[afi][safi][addpath_type]);
232 bgp->tx_addpath.id_allocators[afi][safi][addpath_type] = NULL;
233}
234
235/*
236 * Allocate an Addpath ID for the given type on a path, if necessary.
237 */
238static void bgp_addpath_populate_path(struct id_alloc *allocator,
239 struct bgp_path_info *path,
240 enum bgp_addpath_strat addpath_type)
241{
242 if (bgp_addpath_tx_path(addpath_type, path)) {
243 path->tx_addpath.addpath_tx_id[addpath_type] =
244 idalloc_allocate(allocator);
245 }
246}
247
248/*
249 * Compute addpath ID's on a BGP instance associated with the addpath strategy,
250 * and afi/safi combination. Since we won't waste the time computing addpath IDs
251 * for unused strategies, the first time a peer is configured to use a strategy,
252 * we have to backfill the data.
253 */
254static void bgp_addpath_populate_type(struct bgp *bgp, afi_t afi, safi_t safi,
255 enum bgp_addpath_strat addpath_type)
256{
6ff96d00 257 struct bgp_node *rn, *nrn;
dcc68b5e
MS
258 char buf[200];
259 struct id_alloc *allocator;
260
261 snprintf(buf, sizeof(buf), "Addpath ID Allocator %s:%d/%d",
262 bgp_addpath_names(addpath_type)->config_name, (int)afi,
263 (int)safi);
264 buf[sizeof(buf) - 1] = '\0';
265 zlog_info("Computing addpath IDs for addpath type %s",
266 bgp_addpath_names(addpath_type)->human_name);
267
268 bgp->tx_addpath.id_allocators[afi][safi][addpath_type] =
269 idalloc_new(buf);
270
271 idalloc_reserve(bgp->tx_addpath.id_allocators[afi][safi][addpath_type],
272 BGP_ADDPATH_TX_ID_FOR_DEFAULT_ORIGINATE);
273
274 allocator = bgp->tx_addpath.id_allocators[afi][safi][addpath_type];
275
276 for (rn = bgp_table_top(bgp->rib[afi][safi]); rn;
6ff96d00
RW
277 rn = bgp_route_next(rn)) {
278 struct bgp_path_info *bi;
279
280 if (safi == SAFI_MPLS_VPN) {
281 struct bgp_table *table;
282
283 table = bgp_node_get_bgp_table_info(rn);
284 if (!table)
285 continue;
286
287 for (nrn = bgp_table_top(table); nrn;
288 nrn = bgp_route_next(nrn))
289 for (bi = bgp_node_get_bgp_path_info(nrn); bi;
290 bi = bi->next)
291 bgp_addpath_populate_path(allocator, bi,
292 addpath_type);
293 } else {
294 for (bi = bgp_node_get_bgp_path_info(rn); bi;
295 bi = bi->next)
296 bgp_addpath_populate_path(allocator, bi,
297 addpath_type);
298 }
299 }
dcc68b5e
MS
300}
301
302/*
303 * Handle updates to a peer or group's addpath strategy. If after adjusting
304 * counts a addpath strategy is in use for the first time, or no longer in use,
305 * the IDs for that strategy will be populated or flushed.
306 */
307void bgp_addpath_type_changed(struct bgp *bgp)
308{
309 afi_t afi;
310 safi_t safi;
311 struct listnode *node, *nnode;
312 struct peer *peer;
313 int peer_count[AFI_MAX][SAFI_MAX][BGP_ADDPATH_MAX];
314 enum bgp_addpath_strat type;
315
316 FOREACH_AFI_SAFI(afi, safi) {
317 for (type=0; type<BGP_ADDPATH_MAX; type++) {
318 peer_count[afi][safi][type] = 0;
319 }
1c34cc6b 320 bgp->tx_addpath.total_peercount[afi][safi] = 0;
dcc68b5e
MS
321 }
322
323 for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
324 FOREACH_AFI_SAFI(afi, safi) {
325 type = peer->addpath_type[afi][safi];
326 if (type != BGP_ADDPATH_NONE) {
327 peer_count[afi][safi][type] += 1;
1c34cc6b 328 bgp->tx_addpath.total_peercount[afi][safi] += 1;
dcc68b5e
MS
329 }
330 }
331 }
332
333 FOREACH_AFI_SAFI(afi, safi) {
334 for (type=0; type<BGP_ADDPATH_MAX; type++) {
335 int old = bgp->tx_addpath.peercount[afi][safi][type];
336 int new = peer_count[afi][safi][type];
337
338 bgp->tx_addpath.peercount[afi][safi][type] = new;
339
340 if (old == 0 && new != 0) {
341 bgp_addpath_populate_type(bgp, afi, safi,
342 type);
343 } else if (old != 0 && new == 0) {
344 bgp_addpath_flush_type(bgp, afi, safi, type);
345 }
346 }
347 }
348}
349
350/*
351 * Change the addpath type assigned to a peer, or peer group. In addition to
352 * adjusting the counts, peer sessions will be reset as needed to make the
353 * change take effect.
354 */
355void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi,
356 enum bgp_addpath_strat addpath_type)
357{
358 struct bgp *bgp = peer->bgp;
359 enum bgp_addpath_strat old_type = peer->addpath_type[afi][safi];
360 struct listnode *node, *nnode;
361 struct peer *tmp_peer;
362 struct peer_group *group;
363
364 if (addpath_type == old_type)
365 return;
366
367 if (addpath_type == BGP_ADDPATH_NONE && peer->group &&
368 !CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
369 /* A "no" config on a group member inherits group */
370 addpath_type = peer->group->conf->addpath_type[afi][safi];
371 }
372
373 peer->addpath_type[afi][safi] = addpath_type;
374
375 bgp_addpath_type_changed(bgp);
376
377 if (addpath_type != BGP_ADDPATH_NONE) {
378 if (bgp_addpath_dmed_required(addpath_type)) {
379 if (!bgp_flag_check(bgp, BGP_FLAG_DETERMINISTIC_MED)) {
380 zlog_warn(
381 "%s: enabling bgp deterministic-med, this is required for addpath-tx-bestpath-per-AS",
382 peer->host);
383 bgp_flag_set(bgp, BGP_FLAG_DETERMINISTIC_MED);
384 bgp_recalculate_all_bestpaths(bgp);
385 }
386 }
387 }
388
9165c5f5 389 zlog_info("Resetting peer %s%s due to change in addpath config",
dcc68b5e
MS
390 CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP) ? "group " : "",
391 peer->host);
392
393 if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
394 group = peer->group;
395
396 /* group will be null as peer_group_delete calls peer_delete on
397 * group->conf. That peer_delete will eventuallly end up here
398 * if the group was configured to tx addpaths.
399 */
400 if (group != NULL) {
401 for (ALL_LIST_ELEMENTS(group->peer, node, nnode,
402 tmp_peer)) {
403 if (tmp_peer->addpath_type[afi][safi] ==
404 old_type) {
405 bgp_addpath_set_peer_type(tmp_peer,
406 afi,
407 safi,
408 addpath_type);
409 }
410 }
411 }
412 } else {
413 peer_change_action(peer, afi, safi, peer_change_reset);
414 }
415
416}
417
418/*
419 * Intended to run after bestpath. This function will take TX IDs from paths
420 * that no longer need them, and give them to paths that do. This prevents
421 * best-per-as updates from needing to do a separate withdraw and update just to
422 * swap out which path is sent.
423 */
424void bgp_addpath_update_ids(struct bgp *bgp, struct bgp_node *bn, afi_t afi,
425 safi_t safi)
426{
427 int i;
428 struct bgp_path_info *pi;
429 struct id_alloc_pool **pool_ptr;
430
431 for (i = 0; i < BGP_ADDPATH_MAX; i++) {
432 struct id_alloc *alloc =
433 bgp->tx_addpath.id_allocators[afi][safi][i];
434 pool_ptr = &(bn->tx_addpath.free_ids[i]);
435
436 if (bgp->tx_addpath.peercount[afi][safi][i] == 0)
437 continue;
438
439 /* Free Unused IDs back to the pool.*/
6f94b685 440 for (pi = bgp_node_get_bgp_path_info(bn); pi; pi = pi->next) {
dcc68b5e
MS
441 if (pi->tx_addpath.addpath_tx_id[i] != IDALLOC_INVALID
442 && !bgp_addpath_tx_path(i, pi)) {
443 idalloc_free_to_pool(pool_ptr,
444 pi->tx_addpath.addpath_tx_id[i]);
445 pi->tx_addpath.addpath_tx_id[i] =
446 IDALLOC_INVALID;
447 }
448 }
449
450 /* Give IDs to paths that need them (pulling from the pool) */
6f94b685 451 for (pi = bgp_node_get_bgp_path_info(bn); pi; pi = pi->next) {
dcc68b5e
MS
452 if (pi->tx_addpath.addpath_tx_id[i] == IDALLOC_INVALID
453 && bgp_addpath_tx_path(i, pi)) {
454 pi->tx_addpath.addpath_tx_id[i] =
455 idalloc_allocate_prefer_pool(
456 alloc, pool_ptr);
457 }
458 }
459
460 /* Free any IDs left in the pool to the main allocator */
461 idalloc_drain_pool(alloc, pool_ptr);
462 }
463}